LCOV - code coverage report
Current view: top level - plugins/gtpu - gtpu_encap.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 267 362 73.8 %
Date: 2023-07-05 22:20:52 Functions: 14 21 66.7 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2017 Intel and/or its affiliates.
       3             :  * Licensed under the Apache License, Version 2.0 (the "License");
       4             :  * you may not use this file except in compliance with the License.
       5             :  * You may obtain a copy of the License at:
       6             :  *
       7             :  *     http://www.apache.org/licenses/LICENSE-2.0
       8             :  *
       9             :  * Unless required by applicable law or agreed to in writing, software
      10             :  * distributed under the License is distributed on an "AS IS" BASIS,
      11             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      12             :  * See the License for the specific language governing permissions and
      13             :  * limitations under the License.
      14             :  */
      15             : #include <vppinfra/error.h>
      16             : #include <vppinfra/hash.h>
      17             : #include <vnet/vnet.h>
      18             : #include <vnet/ip/ip.h>
      19             : #include <vnet/ethernet/ethernet.h>
      20             : #include <gtpu/gtpu.h>
      21             : 
      22             : /* Statistics (not all errors) */
      23             : #define foreach_gtpu_encap_error    \
      24             : _(ENCAPSULATED, "good packets encapsulated")
      25             : 
      26             : static char * gtpu_encap_error_strings[] = {
      27             : #define _(sym,string) string,
      28             :   foreach_gtpu_encap_error
      29             : #undef _
      30             : };
      31             : 
      32             : typedef enum {
      33             : #define _(sym,str) GTPU_ENCAP_ERROR_##sym,
      34             :     foreach_gtpu_encap_error
      35             : #undef _
      36             :     GTPU_ENCAP_N_ERROR,
      37             : } gtpu_encap_error_t;
      38             : 
      39             : #define foreach_gtpu_encap_next        \
      40             : _(DROP, "error-drop")                  \
      41             : _(IP4_LOOKUP, "ip4-lookup")             \
      42             : _(IP6_LOOKUP, "ip6-lookup")
      43             : 
      44             : typedef enum {
      45             :     GTPU_ENCAP_NEXT_DROP,
      46             :     GTPU_ENCAP_NEXT_IP4_LOOKUP,
      47             :     GTPU_ENCAP_NEXT_IP6_LOOKUP,
      48             :     GTPU_ENCAP_N_NEXT,
      49             : } gtpu_encap_next_t;
      50             : 
      51             : 
      52             : #define foreach_fixed_header4_offset            \
      53             :     _(0) _(1) _(2) _(3)
      54             : 
      55             : #define foreach_fixed_header6_offset            \
      56             :     _(0) _(1) _(2) _(3) _(4) _(5) _(6)
      57             : 
      58             : always_inline uword
      59           3 : gtpu_encap_inline (vlib_main_t * vm,
      60             :                     vlib_node_runtime_t * node,
      61             :                     vlib_frame_t * from_frame,
      62             :                     u32 is_ip4)
      63             : {
      64             :   u32 n_left_from, next_index, * from, * to_next;
      65           3 :   gtpu_main_t * gtm = &gtpu_main;
      66           3 :   vnet_main_t * vnm = gtm->vnet_main;
      67           3 :   vnet_interface_main_t * im = &vnm->interface_main;
      68           3 :   u32 pkts_encapsulated = 0;
      69           3 :   u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
      70           3 :   u32 thread_index = vlib_get_thread_index();
      71             :   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
      72           3 :   u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
      73           3 :   u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
      74             :   vnet_hw_interface_t * hi0, * hi1, * hi2, * hi3;
      75           3 :   gtpu_tunnel_t * t0 = NULL, * t1 = NULL, * t2 = NULL, * t3 = NULL;
      76             : 
      77           3 :   from = vlib_frame_vector_args (from_frame);
      78           3 :   n_left_from = from_frame->n_vectors;
      79             : 
      80           3 :   next_index = node->cached_next_index;
      81           3 :   stats_sw_if_index = node->runtime_data[0];
      82           3 :   stats_n_packets = stats_n_bytes = 0;
      83             : 
      84           6 :   while (n_left_from > 0)
      85             :     {
      86             :       u32 n_left_to_next;
      87             : 
      88           3 :       vlib_get_next_frame (vm, node, next_index,
      89             :                            to_next, n_left_to_next);
      90             : 
      91           4 :       while (n_left_from >= 8 && n_left_to_next >= 4)
      92             :         {
      93             :           u32 bi0, bi1, bi2, bi3;
      94             :           vlib_buffer_t * b0, * b1, * b2, * b3;
      95             :           u32 flow_hash0, flow_hash1, flow_hash2, flow_hash3;
      96             :           u32 len0, len1, len2, len3;
      97             :           ip4_header_t * ip4_0, * ip4_1, * ip4_2, * ip4_3;
      98             :           ip6_header_t * ip6_0, * ip6_1, * ip6_2, * ip6_3;
      99             :           udp_header_t * udp0, * udp1, * udp2, * udp3;
     100             :           gtpu_header_t * gtpu0, * gtpu1, * gtpu2, * gtpu3;
     101             :           u64 * copy_src0, * copy_dst0;
     102             :           u64 * copy_src1, * copy_dst1;
     103             :           u64 * copy_src2, * copy_dst2;
     104             :           u64 * copy_src3, * copy_dst3;
     105             :           u32 * copy_src_last0, * copy_dst_last0;
     106             :           u32 * copy_src_last1, * copy_dst_last1;
     107             :           u32 * copy_src_last2, * copy_dst_last2;
     108             :           u32 * copy_src_last3, * copy_dst_last3;
     109             :           u16 new_l0, new_l1, new_l2, new_l3;
     110             :           ip_csum_t sum0, sum1, sum2, sum3;
     111             : 
     112             :           /* Prefetch next iteration. */
     113             :           {
     114             :             vlib_buffer_t * p4, * p5, * p6, * p7;
     115             : 
     116           1 :             p4 = vlib_get_buffer (vm, from[4]);
     117           1 :             p5 = vlib_get_buffer (vm, from[5]);
     118           1 :             p6 = vlib_get_buffer (vm, from[6]);
     119           1 :             p7 = vlib_get_buffer (vm, from[7]);
     120             : 
     121           1 :             vlib_prefetch_buffer_header (p4, LOAD);
     122           1 :             vlib_prefetch_buffer_header (p5, LOAD);
     123           1 :             vlib_prefetch_buffer_header (p6, LOAD);
     124           1 :             vlib_prefetch_buffer_header (p7, LOAD);
     125             : 
     126           1 :             CLIB_PREFETCH (p4->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
     127           1 :             CLIB_PREFETCH (p5->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
     128           1 :             CLIB_PREFETCH (p6->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
     129           1 :             CLIB_PREFETCH (p7->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
     130             :           }
     131             : 
     132           1 :           bi0 = from[0];
     133           1 :           bi1 = from[1];
     134           1 :           bi2 = from[2];
     135           1 :           bi3 = from[3];
     136           1 :           to_next[0] = bi0;
     137           1 :           to_next[1] = bi1;
     138           1 :           to_next[2] = bi2;
     139           1 :           to_next[3] = bi3;
     140           1 :           from += 4;
     141           1 :           to_next += 4;
     142           1 :           n_left_to_next -= 4;
     143           1 :           n_left_from -= 4;
     144             : 
     145           1 :           b0 = vlib_get_buffer (vm, bi0);
     146           1 :           b1 = vlib_get_buffer (vm, bi1);
     147           1 :           b2 = vlib_get_buffer (vm, bi2);
     148           1 :           b3 = vlib_get_buffer (vm, bi3);
     149             : 
     150           1 :           flow_hash0 = vnet_l2_compute_flow_hash (b0);
     151           1 :           flow_hash1 = vnet_l2_compute_flow_hash (b1);
     152           1 :           flow_hash2 = vnet_l2_compute_flow_hash (b2);
     153           1 :           flow_hash3 = vnet_l2_compute_flow_hash (b3);
     154             : 
     155             :           /* Get next node index and adj index from tunnel next_dpo */
     156           1 :           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
     157           1 :           sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
     158           1 :           sw_if_index2 = vnet_buffer(b2)->sw_if_index[VLIB_TX];
     159           1 :           sw_if_index3 = vnet_buffer(b3)->sw_if_index[VLIB_TX];
     160           1 :           hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
     161           1 :           hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
     162           1 :           hi2 = vnet_get_sup_hw_interface (vnm, sw_if_index2);
     163           1 :           hi3 = vnet_get_sup_hw_interface (vnm, sw_if_index3);
     164           1 :           t0 = &gtm->tunnels[hi0->dev_instance];
     165           1 :           t1 = &gtm->tunnels[hi1->dev_instance];
     166           1 :           t2 = &gtm->tunnels[hi2->dev_instance];
     167           1 :           t3 = &gtm->tunnels[hi3->dev_instance];
     168             : 
     169             :           /* Note: change to always set next0 if it may be set to drop */
     170           1 :           next0 = t0->next_dpo.dpoi_next_node;
     171           1 :           vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
     172           1 :           next1 = t1->next_dpo.dpoi_next_node;
     173           1 :           vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
     174           1 :           next2 = t2->next_dpo.dpoi_next_node;
     175           1 :           vnet_buffer(b2)->ip.adj_index[VLIB_TX] = t2->next_dpo.dpoi_index;
     176           1 :           next3 = t3->next_dpo.dpoi_next_node;
     177           1 :           vnet_buffer(b3)->ip.adj_index[VLIB_TX] = t3->next_dpo.dpoi_index;
     178             : 
     179             :           /* Apply the rewrite string. $$$$ vnet_rewrite? */
     180           1 :           vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
     181           1 :           vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
     182           1 :           vlib_buffer_advance (b2, -(word)_vec_len(t2->rewrite));
     183           1 :           vlib_buffer_advance (b3, -(word)_vec_len(t3->rewrite));
     184             : 
     185           1 :           if (is_ip4)
     186             :             {
     187           1 :               ip4_0 = vlib_buffer_get_current(b0);
     188           1 :               ip4_1 = vlib_buffer_get_current(b1);
     189           1 :               ip4_2 = vlib_buffer_get_current(b2);
     190           1 :               ip4_3 = vlib_buffer_get_current(b3);
     191             : 
     192             :               /* Copy the fixed header */
     193           1 :               copy_dst0 = (u64 *) ip4_0;
     194           1 :               copy_src0 = (u64 *) t0->rewrite;
     195           1 :               copy_dst1 = (u64 *) ip4_1;
     196           1 :               copy_src1 = (u64 *) t1->rewrite;
     197           1 :               copy_dst2 = (u64 *) ip4_2;
     198           1 :               copy_src2 = (u64 *) t2->rewrite;
     199           1 :               copy_dst3 = (u64 *) ip4_3;
     200           1 :               copy_src3 = (u64 *) t3->rewrite;
     201             : 
     202             :               /* Copy first 32 octets 8-bytes at a time */
     203             : #define _(offs) copy_dst0[offs] = copy_src0[offs];
     204           1 :               foreach_fixed_header4_offset;
     205             : #undef _
     206             : #define _(offs) copy_dst1[offs] = copy_src1[offs];
     207           1 :               foreach_fixed_header4_offset;
     208             : #undef _
     209             : #define _(offs) copy_dst2[offs] = copy_src2[offs];
     210           1 :               foreach_fixed_header4_offset;
     211             : #undef _
     212             : #define _(offs) copy_dst3[offs] = copy_src3[offs];
     213           1 :               foreach_fixed_header4_offset;
     214             : #undef _
     215             :               /* Last 4 octets. Hopefully gcc will be our friend */
     216           1 :               copy_dst_last0 = (u32 *)(&copy_dst0[4]);
     217           1 :               copy_src_last0 = (u32 *)(&copy_src0[4]);
     218           1 :               copy_dst_last0[0] = copy_src_last0[0];
     219           1 :               copy_dst_last1 = (u32 *)(&copy_dst1[4]);
     220           1 :               copy_src_last1 = (u32 *)(&copy_src1[4]);
     221           1 :               copy_dst_last1[0] = copy_src_last1[0];
     222           1 :               copy_dst_last2 = (u32 *)(&copy_dst2[4]);
     223           1 :               copy_src_last2 = (u32 *)(&copy_src2[4]);
     224           1 :               copy_dst_last2[0] = copy_src_last2[0];
     225           1 :               copy_dst_last3 = (u32 *)(&copy_dst3[4]);
     226           1 :               copy_src_last3 = (u32 *)(&copy_src3[4]);
     227           1 :               copy_dst_last3[0] = copy_src_last3[0];
     228             : 
     229             :               /* Fix the IP4 checksum and length */
     230           1 :               sum0 = ip4_0->checksum;
     231             :               new_l0 = /* old_l0 always 0, see the rewrite setup */
     232           1 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
     233           1 :               sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
     234             :                                      length /* changed member */);
     235           1 :               ip4_0->checksum = ip_csum_fold (sum0);
     236           1 :               ip4_0->length = new_l0;
     237           1 :               sum1 = ip4_1->checksum;
     238             :               new_l1 = /* old_l1 always 0, see the rewrite setup */
     239           1 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
     240           1 :               sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
     241             :                                      length /* changed member */);
     242           1 :               ip4_1->checksum = ip_csum_fold (sum1);
     243           1 :               ip4_1->length = new_l1;
     244           1 :               sum2 = ip4_2->checksum;
     245             :               new_l2 = /* old_l0 always 0, see the rewrite setup */
     246           1 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2));
     247           1 :               sum2 = ip_csum_update (sum2, old_l2, new_l2, ip4_header_t,
     248             :                                      length /* changed member */);
     249           1 :               ip4_2->checksum = ip_csum_fold (sum2);
     250           1 :               ip4_2->length = new_l2;
     251           1 :               sum3 = ip4_3->checksum;
     252             :               new_l3 = /* old_l1 always 0, see the rewrite setup */
     253           1 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3));
     254           1 :               sum3 = ip_csum_update (sum3, old_l3, new_l3, ip4_header_t,
     255             :                                      length /* changed member */);
     256           1 :               ip4_3->checksum = ip_csum_fold (sum3);
     257           1 :               ip4_3->length = new_l3;
     258             : 
     259             :               /* Fix UDP length and set source port */
     260           1 :               udp0 = (udp_header_t *)(ip4_0+1);
     261           1 :               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
     262           1 :                                              - sizeof (*ip4_0));
     263           1 :               udp0->length = new_l0;
     264           1 :               udp0->src_port = flow_hash0;
     265           1 :               udp1 = (udp_header_t *)(ip4_1+1);
     266           1 :               new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
     267           1 :                                              - sizeof (*ip4_1));
     268           1 :               udp1->length = new_l1;
     269           1 :               udp1->src_port = flow_hash1;
     270           1 :               udp2 = (udp_header_t *)(ip4_2+1);
     271           1 :               new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
     272           1 :                                              - sizeof (*ip4_2));
     273           1 :               udp2->length = new_l2;
     274           1 :               udp2->src_port = flow_hash2;
     275           1 :               udp3 = (udp_header_t *)(ip4_3+1);
     276           1 :               new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
     277           1 :                                              - sizeof (*ip4_3));
     278           1 :               udp3->length = new_l3;
     279           1 :               udp3->src_port = flow_hash3;
     280             : 
     281             :               /* Fix GTPU length */
     282           1 :               gtpu0 = (gtpu_header_t *)(udp0+1);
     283           1 :               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
     284             :                                              - sizeof (*ip4_0) - sizeof(*udp0)
     285           1 :                                              - GTPU_V1_HDR_LEN);
     286           1 :               gtpu0->length = new_l0;
     287           1 :               gtpu1 = (gtpu_header_t *)(udp1+1);
     288           1 :               new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
     289             :                                              - sizeof (*ip4_1) - sizeof(*udp1)
     290           1 :                                              - GTPU_V1_HDR_LEN);
     291           1 :               gtpu1->length = new_l1;
     292           1 :               gtpu2 = (gtpu_header_t *)(udp2+1);
     293           1 :               new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
     294             :                                              - sizeof (*ip4_2) - sizeof(*udp2)
     295           1 :                                              - GTPU_V1_HDR_LEN);
     296           1 :               gtpu2->length = new_l2;
     297           1 :               gtpu3 = (gtpu_header_t *)(udp3+1);
     298           1 :               new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
     299             :                                              - sizeof (*ip4_3) - sizeof(*udp3)
     300           1 :                                              - GTPU_V1_HDR_LEN);
     301           1 :               gtpu3->length = new_l3;
     302             :             }
     303             :           else /* ipv6 */
     304             :             {
     305           0 :               int bogus = 0;
     306             : 
     307           0 :               ip6_0 = vlib_buffer_get_current(b0);
     308           0 :               ip6_1 = vlib_buffer_get_current(b1);
     309           0 :               ip6_2 = vlib_buffer_get_current(b2);
     310           0 :               ip6_3 = vlib_buffer_get_current(b3);
     311             : 
     312             :               /* Copy the fixed header */
     313           0 :               copy_dst0 = (u64 *) ip6_0;
     314           0 :               copy_src0 = (u64 *) t0->rewrite;
     315           0 :               copy_dst1 = (u64 *) ip6_1;
     316           0 :               copy_src1 = (u64 *) t1->rewrite;
     317           0 :               copy_dst2 = (u64 *) ip6_2;
     318           0 :               copy_src2 = (u64 *) t2->rewrite;
     319           0 :               copy_dst3 = (u64 *) ip6_3;
     320           0 :               copy_src3 = (u64 *) t3->rewrite;
     321             :               /* Copy first 56 (ip6) octets 8-bytes at a time */
     322             : #define _(offs) copy_dst0[offs] = copy_src0[offs];
     323           0 :               foreach_fixed_header6_offset;
     324             : #undef _
     325             : #define _(offs) copy_dst1[offs] = copy_src1[offs];
     326           0 :               foreach_fixed_header6_offset;
     327             : #undef _
     328             : #define _(offs) copy_dst2[offs] = copy_src2[offs];
     329           0 :               foreach_fixed_header6_offset;
     330             : #undef _
     331             : #define _(offs) copy_dst3[offs] = copy_src3[offs];
     332           0 :               foreach_fixed_header6_offset;
     333             : #undef _
     334             :               /* Fix IP6 payload length */
     335             :               new_l0 =
     336           0 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
     337           0 :                                       - sizeof(*ip6_0));
     338           0 :               ip6_0->payload_length = new_l0;
     339             :               new_l1 =
     340           0 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
     341           0 :                                       - sizeof(*ip6_1));
     342           0 :               ip6_1->payload_length = new_l1;
     343             :               new_l2 =
     344           0 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2)
     345           0 :                                       - sizeof(*ip6_2));
     346           0 :               ip6_2->payload_length = new_l2;
     347             :               new_l3 =
     348           0 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3)
     349           0 :                                       - sizeof(*ip6_3));
     350           0 :               ip6_3->payload_length = new_l3;
     351             : 
     352             :               /* Fix UDP length  and set source port */
     353           0 :               udp0 = (udp_header_t *)(ip6_0+1);
     354           0 :               udp0->length = new_l0;
     355           0 :               udp0->src_port = flow_hash0;
     356           0 :               udp1 = (udp_header_t *)(ip6_1+1);
     357           0 :               udp1->length = new_l1;
     358           0 :               udp1->src_port = flow_hash1;
     359           0 :               udp2 = (udp_header_t *)(ip6_2+1);
     360           0 :               udp2->length = new_l2;
     361           0 :               udp2->src_port = flow_hash2;
     362           0 :               udp3 = (udp_header_t *)(ip6_3+1);
     363           0 :               udp3->length = new_l3;
     364           0 :               udp3->src_port = flow_hash3;
     365             : 
     366             :               /* Fix GTPU length */
     367           0 :               gtpu0 = (gtpu_header_t *)(udp0+1);
     368           0 :               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
     369             :                                              - sizeof (*ip6_0) - sizeof(*udp0)
     370           0 :                                              - GTPU_V1_HDR_LEN);
     371           0 :               gtpu0->length = new_l0;
     372           0 :               gtpu1 = (gtpu_header_t *)(udp1+1);
     373           0 :               new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
     374             :                                              - sizeof (*ip6_1) - sizeof(*udp1)
     375           0 :                                              - GTPU_V1_HDR_LEN);
     376           0 :               gtpu1->length = new_l1;
     377           0 :               gtpu2 = (gtpu_header_t *)(udp2+1);
     378           0 :               new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
     379             :                                              - sizeof (*ip6_2) - sizeof(*udp2)
     380           0 :                                              - GTPU_V1_HDR_LEN);
     381           0 :               gtpu2->length = new_l2;
     382           0 :               gtpu3 = (gtpu_header_t *)(udp3+1);
     383           0 :               new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
     384             :                                              - sizeof (*ip6_3) - sizeof(*udp3)
     385           0 :                                              - GTPU_V1_HDR_LEN);
     386           0 :               gtpu3->length = new_l3;
     387             : 
     388             :               /* IPv6 UDP checksum is mandatory */
     389           0 :               udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
     390             :                                                                  ip6_0, &bogus);
     391           0 :               if (udp0->checksum == 0)
     392           0 :                 udp0->checksum = 0xffff;
     393           0 :               udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
     394             :                                                                  ip6_1, &bogus);
     395           0 :               if (udp1->checksum == 0)
     396           0 :                 udp1->checksum = 0xffff;
     397           0 :               udp2->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b2,
     398             :                                                                  ip6_2, &bogus);
     399           0 :               if (udp2->checksum == 0)
     400           0 :                 udp2->checksum = 0xffff;
     401           0 :               udp3->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b3,
     402             :                                                                  ip6_3, &bogus);
     403           0 :               if (udp3->checksum == 0)
     404           0 :                 udp3->checksum = 0xffff;
     405             : 
     406             :             }
     407             : 
     408           1 :           pkts_encapsulated += 4;
     409           1 :           len0 = vlib_buffer_length_in_chain (vm, b0);
     410           1 :           len1 = vlib_buffer_length_in_chain (vm, b1);
     411           1 :           len2 = vlib_buffer_length_in_chain (vm, b2);
     412           1 :           len3 = vlib_buffer_length_in_chain (vm, b3);
     413           1 :           stats_n_packets += 4;
     414           1 :           stats_n_bytes += len0 + len1 + len2 + len3;
     415             : 
     416             :           /* save inner packet flow_hash for load-balance node */
     417           1 :           vnet_buffer (b0)->ip.flow_hash = flow_hash0;
     418           1 :           vnet_buffer (b1)->ip.flow_hash = flow_hash1;
     419           1 :           vnet_buffer (b2)->ip.flow_hash = flow_hash2;
     420           1 :           vnet_buffer (b3)->ip.flow_hash = flow_hash3;
     421             : 
     422             :           /* Batch stats increment on the same gtpu tunnel so counter is not
     423             :              incremented per packet. Note stats are still incremented for deleted
     424             :              and admin-down tunnel where packets are dropped. It is not worthwhile
     425             :              to check for this rare case and affect normal path performance. */
     426           1 :           if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
     427             :                              (sw_if_index1 != stats_sw_if_index) ||
     428             :                              (sw_if_index2 != stats_sw_if_index) ||
     429             :                              (sw_if_index3 != stats_sw_if_index) ))
     430             :             {
     431           1 :               stats_n_packets -= 4;
     432           1 :               stats_n_bytes -= len0 + len1 + len2 + len3;
     433           1 :               if ( (sw_if_index0 == sw_if_index1 ) &&
     434           0 :                    (sw_if_index1 == sw_if_index2 ) &&
     435             :                    (sw_if_index2 == sw_if_index3 ) )
     436             :                 {
     437           0 :                   if (stats_n_packets)
     438           0 :                     vlib_increment_combined_counter
     439           0 :                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     440             :                        thread_index, stats_sw_if_index,
     441             :                        stats_n_packets, stats_n_bytes);
     442           0 :                   stats_sw_if_index = sw_if_index0;
     443           0 :                   stats_n_packets = 4;
     444           0 :                   stats_n_bytes = len0 + len1 + len2 + len3;
     445             :                 }
     446             :               else
     447             :                 {
     448           1 :                   vlib_increment_combined_counter
     449           1 :                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     450             :                        thread_index, sw_if_index0, 1, len0);
     451           1 :                   vlib_increment_combined_counter
     452           1 :                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     453             :                        thread_index, sw_if_index1, 1, len1);
     454           1 :                   vlib_increment_combined_counter
     455           1 :                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     456             :                        thread_index, sw_if_index2, 1, len2);
     457           1 :                   vlib_increment_combined_counter
     458           1 :                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     459             :                        thread_index, sw_if_index3, 1, len3);
     460             :                 }
     461             :             }
     462             : 
     463           1 :           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
     464             :             {
     465             :               gtpu_encap_trace_t *tr =
     466           1 :                 vlib_add_trace (vm, node, b0, sizeof (*tr));
     467           1 :               tr->tunnel_index = t0 - gtm->tunnels;
     468           1 :               tr->tteid = t0->tteid;
     469             :            }
     470             : 
     471           1 :           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
     472             :             {
     473             :               gtpu_encap_trace_t *tr =
     474           1 :                 vlib_add_trace (vm, node, b1, sizeof (*tr));
     475           1 :               tr->tunnel_index = t1 - gtm->tunnels;
     476           1 :               tr->tteid = t1->tteid;
     477             :             }
     478             : 
     479           1 :           if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
     480             :             {
     481             :               gtpu_encap_trace_t *tr =
     482           1 :                 vlib_add_trace (vm, node, b2, sizeof (*tr));
     483           1 :               tr->tunnel_index = t2 - gtm->tunnels;
     484           1 :               tr->tteid = t2->tteid;
     485             :            }
     486             : 
     487           1 :           if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
     488             :             {
     489             :               gtpu_encap_trace_t *tr =
     490           1 :                 vlib_add_trace (vm, node, b3, sizeof (*tr));
     491           1 :               tr->tunnel_index = t3 - gtm->tunnels;
     492           1 :               tr->tteid = t3->tteid;
     493             :             }
     494             : 
     495           1 :           vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
     496             :                                            to_next, n_left_to_next,
     497             :                                            bi0, bi1, bi2, bi3,
     498             :                                            next0, next1, next2, next3);
     499             :         }
     500             : 
     501          11 :       while (n_left_from > 0 && n_left_to_next > 0)
     502             :         {
     503             :           u32 bi0;
     504             :           vlib_buffer_t * b0;
     505             :           u32 flow_hash0;
     506             :           u32 len0;
     507             :           ip4_header_t * ip4_0;
     508             :           ip6_header_t * ip6_0;
     509             :           udp_header_t * udp0;
     510             :           gtpu_header_t * gtpu0;
     511             :           u64 * copy_src0, * copy_dst0;
     512             :           u32 * copy_src_last0, * copy_dst_last0;
     513             :           u16 new_l0;
     514             :           ip_csum_t sum0;
     515             : 
     516           8 :           bi0 = from[0];
     517           8 :           to_next[0] = bi0;
     518           8 :           from += 1;
     519           8 :           to_next += 1;
     520           8 :           n_left_from -= 1;
     521           8 :           n_left_to_next -= 1;
     522             : 
     523           8 :           b0 = vlib_get_buffer (vm, bi0);
     524             : 
     525           8 :           flow_hash0 = vnet_l2_compute_flow_hash(b0);
     526             : 
     527             :           /* Get next node index and adj index from tunnel next_dpo */
     528           8 :           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
     529           8 :           hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
     530           8 :           t0 = &gtm->tunnels[hi0->dev_instance];
     531             :           /* Note: change to always set next0 if it may be set to drop */
     532           8 :           next0 = t0->next_dpo.dpoi_next_node;
     533           8 :           vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
     534             : 
     535             :           /* Apply the rewrite string. $$$$ vnet_rewrite? */
     536           8 :           vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
     537             : 
     538           8 :           if (is_ip4)
     539             :             {
     540           8 :               ip4_0 = vlib_buffer_get_current(b0);
     541             : 
     542             :               /* Copy the fixed header */
     543           8 :               copy_dst0 = (u64 *) ip4_0;
     544           8 :               copy_src0 = (u64 *) t0->rewrite;
     545             :               /* Copy first 32 octets 8-bytes at a time */
     546             : #define _(offs) copy_dst0[offs] = copy_src0[offs];
     547           8 :               foreach_fixed_header4_offset;
     548             : #undef _
     549             :               /* Last 4 octets. Hopefully gcc will be our friend */
     550           8 :               copy_dst_last0 = (u32 *)(&copy_dst0[4]);
     551           8 :               copy_src_last0 = (u32 *)(&copy_src0[4]);
     552           8 :               copy_dst_last0[0] = copy_src_last0[0];
     553             : 
     554             :               /* Fix the IP4 checksum and length */
     555           8 :               sum0 = ip4_0->checksum;
     556             :               new_l0 = /* old_l0 always 0, see the rewrite setup */
     557           8 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
     558           8 :               sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
     559             :                                      length /* changed member */);
     560           8 :               ip4_0->checksum = ip_csum_fold (sum0);
     561           8 :               ip4_0->length = new_l0;
     562             : 
     563             :               /* Fix UDP length and set source port */
     564           8 :               udp0 = (udp_header_t *)(ip4_0+1);
     565           8 :               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
     566           8 :                                              - sizeof (*ip4_0));
     567           8 :               udp0->length = new_l0;
     568           8 :               udp0->src_port = flow_hash0;
     569             : 
     570             :               /* Fix GTPU length */
     571           8 :               gtpu0 = (gtpu_header_t *)(udp0+1);
     572           8 :               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
     573             :                                              - sizeof (*ip4_0) - sizeof(*udp0)
     574           8 :                                              - GTPU_V1_HDR_LEN);
     575           8 :               gtpu0->length = new_l0;
     576             :             }
     577             : 
     578             :           else /* ip6 path */
     579             :             {
     580           0 :               int bogus = 0;
     581             : 
     582           0 :               ip6_0 = vlib_buffer_get_current(b0);
     583             :               /* Copy the fixed header */
     584           0 :               copy_dst0 = (u64 *) ip6_0;
     585           0 :               copy_src0 = (u64 *) t0->rewrite;
     586             :               /* Copy first 56 (ip6) octets 8-bytes at a time */
     587             : #define _(offs) copy_dst0[offs] = copy_src0[offs];
     588           0 :               foreach_fixed_header6_offset;
     589             : #undef _
     590             :               /* Fix IP6 payload length */
     591             :               new_l0 =
     592           0 :                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
     593           0 :                                       - sizeof(*ip6_0));
     594           0 :               ip6_0->payload_length = new_l0;
     595             : 
     596             :               /* Fix UDP length  and set source port */
     597           0 :               udp0 = (udp_header_t *)(ip6_0+1);
     598           0 :               udp0->length = new_l0;
     599           0 :               udp0->src_port = flow_hash0;
     600             : 
     601             :               /* Fix GTPU length */
     602           0 :               gtpu0 = (gtpu_header_t *)(udp0+1);
     603           0 :               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
     604             :                                              - sizeof (*ip4_0) - sizeof(*udp0)
     605           0 :                                              - GTPU_V1_HDR_LEN);
     606           0 :               gtpu0->length = new_l0;
     607             : 
     608             :               /* IPv6 UDP checksum is mandatory */
     609           0 :               udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
     610             :                                                                  ip6_0, &bogus);
     611           0 :               if (udp0->checksum == 0)
     612           0 :                 udp0->checksum = 0xffff;
     613             :             }
     614             : 
     615           8 :           pkts_encapsulated ++;
     616           8 :           len0 = vlib_buffer_length_in_chain (vm, b0);
     617           8 :           stats_n_packets += 1;
     618           8 :           stats_n_bytes += len0;
     619             : 
     620             :           /* save inner packet flow_hash for load-balance node */
     621           8 :           vnet_buffer (b0)->ip.flow_hash = flow_hash0;
     622             : 
     623             :           /* Batch stats increment on the same gtpu tunnel so counter is not
     624             :              incremented per packet. Note stats are still incremented for deleted
     625             :              and admin-down tunnel where packets are dropped. It is not worthwhile
     626             :              to check for this rare case and affect normal path performance. */
     627           8 :           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
     628             :             {
     629           8 :               stats_n_packets -= 1;
     630           8 :               stats_n_bytes -= len0;
     631           8 :               if (stats_n_packets)
     632           5 :                 vlib_increment_combined_counter
     633           5 :                   (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     634             :                    thread_index, stats_sw_if_index,
     635             :                    stats_n_packets, stats_n_bytes);
     636           8 :               stats_n_packets = 1;
     637           8 :               stats_n_bytes = len0;
     638           8 :               stats_sw_if_index = sw_if_index0;
     639             :             }
     640             : 
     641           8 :           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
     642             :             {
     643             :               gtpu_encap_trace_t *tr =
     644           8 :                 vlib_add_trace (vm, node, b0, sizeof (*tr));
     645           8 :               tr->tunnel_index = t0 - gtm->tunnels;
     646           8 :               tr->tteid = t0->tteid;
     647             :             }
     648           8 :           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
     649             :                                            to_next, n_left_to_next,
     650             :                                            bi0, next0);
     651             :         }
     652             : 
     653           3 :       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     654             :     }
     655             : 
     656             :   /* Do we still need this now that tunnel tx stats is kept? */
     657           3 :   vlib_node_increment_counter (vm, node->node_index,
     658             :                                GTPU_ENCAP_ERROR_ENCAPSULATED,
     659             :                                pkts_encapsulated);
     660             : 
     661             :   /* Increment any remaining batch stats */
     662           3 :   if (stats_n_packets)
     663             :     {
     664           3 :       vlib_increment_combined_counter
     665           3 :         (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
     666             :          thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
     667           3 :       node->runtime_data[0] = stats_sw_if_index;
     668             :     }
     669             : 
     670           3 :   return from_frame->n_vectors;
     671             : }
     672             : 
     673        2239 : VLIB_NODE_FN (gtpu4_encap_node) (vlib_main_t * vm,
     674             :               vlib_node_runtime_t * node,
     675             :               vlib_frame_t * from_frame)
     676             : {
     677           3 :   return gtpu_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
     678             : }
     679             : 
     680        2236 : VLIB_NODE_FN (gtpu6_encap_node) (vlib_main_t * vm,
     681             :               vlib_node_runtime_t * node,
     682             :               vlib_frame_t * from_frame)
     683             : {
     684           0 :   return gtpu_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
     685             : }
     686             : 
     687      129960 : VLIB_REGISTER_NODE (gtpu4_encap_node) = {
     688             :   .name = "gtpu4-encap",
     689             :   .vector_size = sizeof (u32),
     690             :   .format_trace = format_gtpu_encap_trace,
     691             :   .type = VLIB_NODE_TYPE_INTERNAL,
     692             :   .n_errors = ARRAY_LEN(gtpu_encap_error_strings),
     693             :   .error_strings = gtpu_encap_error_strings,
     694             :   .n_next_nodes = GTPU_ENCAP_N_NEXT,
     695             :   .next_nodes = {
     696             : #define _(s,n) [GTPU_ENCAP_NEXT_##s] = n,
     697             :     foreach_gtpu_encap_next
     698             : #undef _
     699             :   },
     700             : };
     701             : 
     702      129960 : VLIB_REGISTER_NODE (gtpu6_encap_node) = {
     703             :   .name = "gtpu6-encap",
     704             :   .vector_size = sizeof (u32),
     705             :   .format_trace = format_gtpu_encap_trace,
     706             :   .type = VLIB_NODE_TYPE_INTERNAL,
     707             :   .n_errors = ARRAY_LEN(gtpu_encap_error_strings),
     708             :   .error_strings = gtpu_encap_error_strings,
     709             :   .n_next_nodes = GTPU_ENCAP_N_NEXT,
     710             :   .next_nodes = {
     711             : #define _(s,n) [GTPU_ENCAP_NEXT_##s] = n,
     712             :     foreach_gtpu_encap_next
     713             : #undef _
     714             :   },
     715             : };
     716             : 

Generated by: LCOV version 1.14