LCOV - code coverage report
Current view: top level - vnet/vxlan-gpe - encap.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 1 139 0.7 %
Date: 2023-07-05 22:20:52 Functions: 2 6 33.3 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2015 Cisco and/or its affiliates.
       3             :  * Licensed under the Apache License, Version 2.0 (the "License");
       4             :  * you may not use this file except in compliance with the License.
       5             :  * You may obtain a copy of the License at:
       6             :  *
       7             :  *     http://www.apache.org/licenses/LICENSE-2.0
       8             :  *
       9             :  * Unless required by applicable law or agreed to in writing, software
      10             :  * distributed under the License is distributed on an "AS IS" BASIS,
      11             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      12             :  * See the License for the specific language governing permissions and
      13             :  * limitations under the License.
      14             :  */
      15             : /**
      16             :  *  @file
      17             :  *  @brief Functions for encapsulating VXLAN GPE tunnels
      18             :  *
      19             : */
      20             : #include <vppinfra/error.h>
      21             : #include <vppinfra/hash.h>
      22             : #include <vnet/vnet.h>
      23             : #include <vnet/ip/ip.h>
      24             : #include <vnet/ethernet/ethernet.h>
      25             : #include <vnet/udp/udp_inlines.h>
      26             : #include <vnet/vxlan-gpe/vxlan_gpe.h>
      27             : 
      28             : /** Statistics (not really errors) */
      29             : #define foreach_vxlan_gpe_encap_error    \
      30             : _(ENCAPSULATED, "good packets encapsulated")
      31             : 
      32             : /**
      33             :  * @brief VXLAN GPE encap error strings
      34             :  */
      35             : static char *vxlan_gpe_encap_error_strings[] = {
      36             : #define _(sym,string) string,
      37             :   foreach_vxlan_gpe_encap_error
      38             : #undef _
      39             : };
      40             : 
      41             : /**
      42             :  * @brief Struct for VXLAN GPE errors/counters
      43             :  */
      44             : typedef enum
      45             : {
      46             : #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
      47             :   foreach_vxlan_gpe_encap_error
      48             : #undef _
      49             :     VXLAN_GPE_ENCAP_N_ERROR,
      50             : } vxlan_gpe_encap_error_t;
      51             : 
      52             : /**
      53             :  * @brief Struct for tracing VXLAN GPE encapsulated packets
      54             :  */
      55             : typedef struct
      56             : {
      57             :   u32 tunnel_index;
      58             : } vxlan_gpe_encap_trace_t;
      59             : 
      60             : /**
      61             :  * @brief Trace of packets encapsulated in VXLAN GPE
      62             :  *
      63             :  * @param *s
      64             :  * @param *args
      65             :  *
      66             :  * @return *s
      67             :  *
      68             :  */
      69             : u8 *
      70           0 : format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
      71             : {
      72           0 :   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
      73           0 :   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
      74           0 :   vxlan_gpe_encap_trace_t *t = va_arg (*args, vxlan_gpe_encap_trace_t *);
      75             : 
      76           0 :   s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
      77           0 :   return s;
      78             : }
      79             : 
      80             : /**
      81             :  * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
      82             :  *
      83             :  * @param *ngm
      84             :  * @param *b0
      85             :  * @param *t0 contains rewrite header
      86             :  * @param *next0 relative index of next dispatch function (next node)
      87             :  * @param is_v4 Is this IPv4? (or IPv6)
      88             :  *
      89             :  */
      90             : always_inline void
      91           0 : vxlan_gpe_encap_one_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0,
      92             :                             vxlan_gpe_tunnel_t *t0, u32 *next0,
      93             :                             ip_address_family_t af)
      94             : {
      95             :   ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
      96             :   ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
      97             : 
      98           0 :   ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af,
      99             :                     N_AF, UDP_ENCAP_FIXUP_NONE);
     100           0 :   next0[0] = t0->encap_next_node;
     101           0 : }
     102             : 
     103             : /**
     104             :  * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
     105             :  *
     106             :  * @param *ngm
     107             :  * @param *b0 Packet0
     108             :  * @param *b1 Packet1
     109             :  * @param *t0 contains rewrite header for Packet0
     110             :  * @param *t1 contains rewrite header for Packet1
     111             :  * @param *next0 relative index of next dispatch function (next node) for Packet0
     112             :  * @param *next1 relative index of next dispatch function (next node) for Packet1
     113             :  * @param is_v4 Is this IPv4? (or IPv6)
     114             :  *
     115             :  */
     116             : always_inline void
     117           0 : vxlan_gpe_encap_two_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0,
     118             :                             vlib_buffer_t *b1, vxlan_gpe_tunnel_t *t0,
     119             :                             vxlan_gpe_tunnel_t *t1, u32 *next0, u32 *next1,
     120             :                             ip_address_family_t af)
     121             : {
     122             :   ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
     123             :   ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
     124             : 
     125           0 :   ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af,
     126             :                     N_AF, UDP_ENCAP_FIXUP_NONE);
     127           0 :   ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, af,
     128             :                     N_AF, UDP_ENCAP_FIXUP_NONE);
     129           0 :   next0[0] = next1[0] = t0->encap_next_node;
     130           0 : }
     131             : 
     132             : /**
     133             :  * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
     134             :  *
     135             :  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
     136             :  * tunnels are "establish local". This means that we don't have a TX interface as yet
     137             :  * as we need to look up where the outer-header dest is. By setting the TX index in the
     138             :  * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
     139             :  *
     140             :  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
     141             :  *
     142             :  * @node vxlan-gpe-input
     143             :  * @param *vm
     144             :  * @param *node
     145             :  * @param *from_frame
     146             :  *
     147             :  * @return from_frame->n_vectors
     148             :  *
     149             :  */
     150             : static uword
     151           0 : vxlan_gpe_encap (vlib_main_t * vm,
     152             :                  vlib_node_runtime_t * node, vlib_frame_t * from_frame)
     153             : {
     154             :   u32 n_left_from, next_index, *from, *to_next;
     155           0 :   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
     156           0 :   vnet_main_t *vnm = ngm->vnet_main;
     157           0 :   vnet_interface_main_t *im = &vnm->interface_main;
     158           0 :   u32 pkts_encapsulated = 0;
     159           0 :   u32 thread_index = vm->thread_index;
     160             :   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
     161           0 :   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
     162             : 
     163           0 :   from = vlib_frame_vector_args (from_frame);
     164           0 :   n_left_from = from_frame->n_vectors;
     165             : 
     166           0 :   next_index = node->cached_next_index;
     167           0 :   stats_sw_if_index = node->runtime_data[0];
     168           0 :   stats_n_packets = stats_n_bytes = 0;
     169           0 :   vlib_get_buffers (vm, from, bufs, n_left_from);
     170             : 
     171           0 :   while (n_left_from > 0)
     172             :     {
     173             :       u32 n_left_to_next;
     174           0 :       u32 sw_if_index0 = ~0, sw_if_index1 = ~0, len0, len1;
     175             :       vnet_hw_interface_t *hi0, *hi1;
     176           0 :       vxlan_gpe_tunnel_t *t0 = NULL, *t1 = NULL;
     177           0 :       ip_address_family_t af_0 = AF_IP4, af_1 = AF_IP4;
     178             : 
     179           0 :       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
     180             : 
     181           0 :       while (n_left_from >= 4 && n_left_to_next >= 2)
     182             :         {
     183             :           u32 bi0, bi1;
     184             :           u32 next0, next1;
     185             : 
     186           0 :           next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
     187             : 
     188             :           /* Prefetch next iteration. */
     189             :           {
     190           0 :             vlib_prefetch_buffer_header (b[2], LOAD);
     191           0 :             vlib_prefetch_buffer_header (b[3], LOAD);
     192             : 
     193           0 :             CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
     194             :                            2 * CLIB_CACHE_LINE_BYTES, LOAD);
     195           0 :             CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
     196             :                            2 * CLIB_CACHE_LINE_BYTES, LOAD);
     197             :           }
     198             : 
     199           0 :           bi0 = from[0];
     200           0 :           bi1 = from[1];
     201           0 :           to_next[0] = bi0;
     202           0 :           to_next[1] = bi1;
     203           0 :           from += 2;
     204           0 :           to_next += 2;
     205           0 :           n_left_to_next -= 2;
     206           0 :           n_left_from -= 2;
     207             : 
     208             :           /* get "af_0" */
     209           0 :           if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
     210             :             {
     211           0 :               sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
     212             :               hi0 =
     213           0 :                 vnet_get_sup_hw_interface (vnm,
     214           0 :                                            vnet_buffer (b[0])->sw_if_index
     215             :                                            [VLIB_TX]);
     216           0 :               t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
     217           0 :               af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6);
     218             :             }
     219             : 
     220             :           /* get "af_1" */
     221           0 :           if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
     222             :             {
     223           0 :               if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
     224             :                 {
     225           0 :                   sw_if_index1 = sw_if_index0;
     226           0 :                   hi1 = hi0;
     227           0 :                   t1 = t0;
     228           0 :                   af_1 = af_0;
     229             :                 }
     230             :               else
     231             :                 {
     232           0 :                   sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
     233             :                   hi1 =
     234           0 :                     vnet_get_sup_hw_interface (vnm,
     235           0 :                                                vnet_buffer (b[1])->sw_if_index
     236             :                                                [VLIB_TX]);
     237           0 :                   t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
     238           0 :                   af_1 =
     239           0 :                     (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6);
     240             :                 }
     241             :             }
     242             : 
     243           0 :           if (PREDICT_TRUE (af_0 == af_1))
     244             :             {
     245           0 :               vxlan_gpe_encap_two_inline (ngm, b[0], b[1], t0, t1, &next0,
     246             :                                           &next1, af_0);
     247             :             }
     248             :           else
     249             :             {
     250           0 :               vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0);
     251           0 :               vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, af_1);
     252             :             }
     253             : 
     254             :           /* Reset to look up tunnel partner in the configured FIB */
     255           0 :           vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index;
     256           0 :           vnet_buffer (b[1])->sw_if_index[VLIB_TX] = t1->encap_fib_index;
     257           0 :           vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
     258           0 :           vnet_buffer (b[1])->sw_if_index[VLIB_RX] = sw_if_index1;
     259           0 :           pkts_encapsulated += 2;
     260             : 
     261           0 :           len0 = vlib_buffer_length_in_chain (vm, b[0]);
     262           0 :           len1 = vlib_buffer_length_in_chain (vm, b[1]);
     263           0 :           stats_n_packets += 2;
     264           0 :           stats_n_bytes += len0 + len1;
     265             : 
     266             :           /* Batch stats increment on the same vxlan tunnel so counter is not
     267             :              incremented per packet. Note stats are still incremented for deleted
     268             :              and admin-down tunnel where packets are dropped. It is not worthwhile
     269             :              to check for this rare case and affect normal path performance. */
     270           0 :           if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index)
     271             :                              || (sw_if_index1 != stats_sw_if_index)))
     272             :             {
     273           0 :               stats_n_packets -= 2;
     274           0 :               stats_n_bytes -= len0 + len1;
     275           0 :               if (sw_if_index0 == sw_if_index1)
     276             :                 {
     277           0 :                   if (stats_n_packets)
     278           0 :                     vlib_increment_combined_counter
     279           0 :                       (im->combined_sw_if_counters +
     280             :                        VNET_INTERFACE_COUNTER_TX, thread_index,
     281             :                        stats_sw_if_index, stats_n_packets, stats_n_bytes);
     282           0 :                   stats_sw_if_index = sw_if_index0;
     283           0 :                   stats_n_packets = 2;
     284           0 :                   stats_n_bytes = len0 + len1;
     285             :                 }
     286             :               else
     287             :                 {
     288           0 :                   vlib_increment_combined_counter (im->combined_sw_if_counters
     289             :                                                    +
     290             :                                                    VNET_INTERFACE_COUNTER_TX,
     291             :                                                    thread_index, sw_if_index0,
     292             :                                                    1, len0);
     293           0 :                   vlib_increment_combined_counter (im->combined_sw_if_counters
     294             :                                                    +
     295             :                                                    VNET_INTERFACE_COUNTER_TX,
     296             :                                                    thread_index, sw_if_index1,
     297             :                                                    1, len1);
     298             :                 }
     299             :             }
     300             : 
     301           0 :           if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
     302             :             {
     303             :               vxlan_gpe_encap_trace_t *tr =
     304           0 :                 vlib_add_trace (vm, node, b[0], sizeof (*tr));
     305           0 :               tr->tunnel_index = t0 - ngm->tunnels;
     306             :             }
     307             : 
     308           0 :           if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
     309             :             {
     310           0 :               vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b[1],
     311             :                                                             sizeof (*tr));
     312           0 :               tr->tunnel_index = t1 - ngm->tunnels;
     313             :             }
     314           0 :           b += 2;
     315             : 
     316           0 :           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
     317             :                                            n_left_to_next, bi0, bi1, next0,
     318             :                                            next1);
     319             :         }
     320             : 
     321           0 :       while (n_left_from > 0 && n_left_to_next > 0)
     322             :         {
     323             :           u32 bi0;
     324           0 :           u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
     325             : 
     326           0 :           bi0 = from[0];
     327           0 :           to_next[0] = bi0;
     328           0 :           from += 1;
     329           0 :           to_next += 1;
     330           0 :           n_left_from -= 1;
     331           0 :           n_left_to_next -= 1;
     332             : 
     333             :           /* get "af_0" */
     334           0 :           if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
     335             :             {
     336           0 :               sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
     337             :               hi0 =
     338           0 :                 vnet_get_sup_hw_interface (vnm,
     339           0 :                                            vnet_buffer (b[0])->sw_if_index
     340             :                                            [VLIB_TX]);
     341             : 
     342           0 :               t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
     343             : 
     344           0 :               af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6);
     345             :             }
     346             : 
     347           0 :           vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0);
     348             : 
     349             :           /* Reset to look up tunnel partner in the configured FIB */
     350           0 :           vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index;
     351           0 :           vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
     352           0 :           pkts_encapsulated++;
     353             : 
     354           0 :           len0 = vlib_buffer_length_in_chain (vm, b[0]);
     355           0 :           stats_n_packets += 1;
     356           0 :           stats_n_bytes += len0;
     357             : 
     358             :           /* Batch stats increment on the same vxlan tunnel so counter is not
     359             :            *  incremented per packet. Note stats are still incremented for deleted
     360             :            *  and admin-down tunnel where packets are dropped. It is not worthwhile
     361             :            *  to check for this rare case and affect normal path performance. */
     362           0 :           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
     363             :             {
     364           0 :               stats_n_packets -= 1;
     365           0 :               stats_n_bytes -= len0;
     366           0 :               if (stats_n_packets)
     367           0 :                 vlib_increment_combined_counter (im->combined_sw_if_counters +
     368             :                                                  VNET_INTERFACE_COUNTER_TX,
     369             :                                                  thread_index,
     370             :                                                  stats_sw_if_index,
     371             :                                                  stats_n_packets,
     372             :                                                  stats_n_bytes);
     373           0 :               stats_n_packets = 1;
     374           0 :               stats_n_bytes = len0;
     375           0 :               stats_sw_if_index = sw_if_index0;
     376             :             }
     377           0 :           if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
     378             :             {
     379           0 :               vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b[0],
     380             :                                                             sizeof (*tr));
     381           0 :               tr->tunnel_index = t0 - ngm->tunnels;
     382             :             }
     383           0 :           b += 1;
     384             : 
     385           0 :           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
     386             :                                            n_left_to_next, bi0, next0);
     387             :         }
     388             : 
     389           0 :       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     390             :     }
     391           0 :   vlib_node_increment_counter (vm, node->node_index,
     392             :                                VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
     393             :                                pkts_encapsulated);
     394             :   /* Increment any remaining batch stats */
     395           0 :   if (stats_n_packets)
     396             :     {
     397           0 :       vlib_increment_combined_counter (im->combined_sw_if_counters +
     398             :                                        VNET_INTERFACE_COUNTER_TX,
     399             :                                        thread_index, stats_sw_if_index,
     400             :                                        stats_n_packets, stats_n_bytes);
     401           0 :       node->runtime_data[0] = stats_sw_if_index;
     402             :     }
     403             : 
     404           0 :   return from_frame->n_vectors;
     405             : }
     406             : 
     407             : /* *INDENT-OFF* */
     408      178120 : VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
     409             :   .function = vxlan_gpe_encap,
     410             :   .name = "vxlan-gpe-encap",
     411             :   .vector_size = sizeof (u32),
     412             :   .format_trace = format_vxlan_gpe_encap_trace,
     413             :   .type = VLIB_NODE_TYPE_INTERNAL,
     414             : 
     415             :   .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
     416             :   .error_strings = vxlan_gpe_encap_error_strings,
     417             : 
     418             :   .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
     419             : 
     420             :   .next_nodes = {
     421             :     [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
     422             :     [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
     423             :     [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
     424             :   },
     425             : };
     426             : /* *INDENT-ON* */
     427             : 
     428             : 
     429             : /*
     430             :  * fd.io coding-style-patch-verification: ON
     431             :  *
     432             :  * Local Variables:
     433             :  * eval: (c-set-style "gnu")
     434             :  * End:
     435             :  */

Generated by: LCOV version 1.14