LCOV - code coverage report
Current view: top level - vnet/ipsec - ipsec_output.h (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 117 238 49.2 %
Date: 2023-07-05 22:20:52 Functions: 7 10 70.0 %

          Line data    Source code
       1             : /*
       2             :  *------------------------------------------------------------------
       3             :  * Copyright (c) 2021 Intel and/or its affiliates.
       4             :  * Licensed under the Apache License, Version 2.0 (the "License");
       5             :  * you may not use this file except in compliance with the License.
       6             :  * You may obtain a copy of the License at:
       7             :  *
       8             :  *     http://www.apache.org/licenses/LICENSE-2.0
       9             :  *
      10             :  * Unless required by applicable law or agreed to in writing, software
      11             :  * distributed under the License is distributed on an "AS IS" BASIS,
      12             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      13             :  * See the License for the specific language governing permissions and
      14             :  * limitations under the License.
      15             :  *------------------------------------------------------------------
      16             :  */
      17             : 
      18             : #ifndef IPSEC_OUTPUT_H
      19             : #define IPSEC_OUTPUT_H
      20             : 
      21             : #include <vppinfra/types.h>
      22             : #include <vnet/ipsec/ipsec_spd.h>
      23             : #include <vnet/ipsec/ipsec_spd_fp_lookup.h>
      24             : 
      25             : always_inline void
      26          17 : ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
      27             :                                      u16 lp, u16 rp, u32 pol_id)
      28             : {
      29             :   u64 hash;
      30          17 :   u8 overwrite = 0, stale_overwrite = 0;
      31          17 :   ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
      32             :                                                    (ip4_address_t) ra },
      33             :                                      .port = { lp, rp },
      34             :                                      .proto = pr };
      35             : 
      36          17 :   ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
      37             : 
      38          17 :   hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
      39          17 :   hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
      40             : 
      41          17 :   ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
      42             :   /* Check if we are overwriting an existing entry so we know
      43             :   whether to increment the flow cache counter. Since flow
      44             :   cache counter is reset on any policy add/remove, but
      45             :   hash table values are not, we also need to check if the entry
      46             :   we are overwriting is stale or not. If it's a stale entry
      47             :   overwrite, we still want to increment flow cache counter */
      48          17 :   overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
      49             :   /* Check for stale entry by comparing with current epoch count */
      50          17 :   if (PREDICT_FALSE (overwrite))
      51           7 :     stale_overwrite =
      52           7 :       (im->epoch_count !=
      53           7 :        ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
      54          17 :   clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
      55             :                     sizeof (ip4_5tuple.kv_16_8));
      56          17 :   ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
      57             : 
      58             :   /* Increment the counter to track active flow cache entries
      59             :     when entering a fresh entry or overwriting a stale one */
      60          17 :   if (!overwrite || stale_overwrite)
      61          16 :     clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
      62             : 
      63          17 :   return;
      64             : }
      65             : 
      66             : always_inline void
      67           0 : ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
      68             :                                        ipsec4_spd_5tuple_t *ip4_5tuple,
      69             :                                        u32 pol_id)
      70             : {
      71             :   u64 hash;
      72           0 :   u8 overwrite = 0, stale_overwrite = 0;
      73             : 
      74           0 :   ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
      75             : 
      76           0 :   hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
      77           0 :   hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
      78             : 
      79           0 :   ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
      80             :   /* Check if we are overwriting an existing entry so we know
      81             :   whether to increment the flow cache counter. Since flow
      82             :   cache counter is reset on any policy add/remove, but
      83             :   hash table values are not, we also need to check if the entry
      84             :   we are overwriting is stale or not. If it's a stale entry
      85             :   overwrite, we still want to increment flow cache counter */
      86           0 :   overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
      87             :   /* Check for stale entry by comparing with current epoch count */
      88           0 :   if (PREDICT_FALSE (overwrite))
      89           0 :     stale_overwrite =
      90           0 :       (im->epoch_count !=
      91           0 :        ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
      92           0 :   clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
      93             :                     sizeof (ip4_5tuple->kv_16_8));
      94           0 :   ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
      95             : 
      96             :   /* Increment the counter to track active flow cache entries
      97             :     when entering a fresh entry or overwriting a stale one */
      98           0 :   if (!overwrite || stale_overwrite)
      99           0 :     clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
     100             : 
     101           0 :   return;
     102             : }
     103             : 
     104             : always_inline void
     105          70 : ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
     106             :                                 u16 lp, u16 rp, u8 pr)
     107             : {
     108          70 :   clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
     109          70 :   tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
     110          70 :   tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
     111             : 
     112          70 :   if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
     113             :                      (pr != IP_PROTOCOL_SCTP)))
     114             :     {
     115           0 :       tuple->lport = 0;
     116           0 :       tuple->rport = 0;
     117             :     }
     118             :   else
     119             :     {
     120          70 :       tuple->lport = lp;
     121          70 :       tuple->rport = rp;
     122             :     }
     123             : 
     124          70 :   tuple->protocol = pr;
     125          70 :   tuple->is_ipv6 = 0;
     126          70 : }
     127             : 
     128             : always_inline void
     129           0 : ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
     130             :                                   ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
     131             : {
     132           0 :   u32 n_left = n;
     133           0 :   ipsec_fp_5tuple_t *tuple = tuples;
     134             : 
     135           0 :   while (n_left)
     136             :     {
     137           0 :       clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
     138           0 :       tuple->laddr.as_u32 =
     139           0 :         clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
     140           0 :       tuple->raddr.as_u32 =
     141           0 :         clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
     142           0 :       if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
     143             :                          (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
     144             :                          (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
     145             :         {
     146           0 :           tuple->lport = 0;
     147           0 :           tuple->rport = 0;
     148             :         }
     149             :       else
     150             :         {
     151           0 :           tuple->lport = ip4_5tuple->port[0];
     152           0 :           tuple->rport = ip4_5tuple->port[1];
     153             :         }
     154           0 :       tuple->protocol = ip4_5tuple->proto;
     155           0 :       tuple->is_ipv6 = 0;
     156           0 :       n_left--;
     157           0 :       tuple++;
     158             :     }
     159           0 : }
     160             : 
     161             : always_inline int
     162           0 : ipsec_output_policy_match_n (ipsec_spd_t *spd,
     163             :                              ipsec4_spd_5tuple_t *ip4_5tuples,
     164             :                              ipsec_policy_t **policies, u32 n,
     165             :                              u8 flow_cache_enabled)
     166           0 : {
     167           0 :   ipsec_main_t *im = &ipsec_main;
     168             :   ipsec_policy_t *p;
     169           0 :   ipsec_policy_t **pp = policies;
     170           0 :   u32 n_left = n;
     171           0 :   ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
     172           0 :   u32 policy_ids[n], *policy_id = policy_ids;
     173           0 :   ipsec_fp_5tuple_t tuples[n];
     174             :   u32 *i;
     175           0 :   u32 counter = 0;
     176             : 
     177           0 :   if (!spd)
     178           0 :     return 0;
     179             : 
     180           0 :   clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
     181             : 
     182           0 :   if (im->fp_spd_ipv4_out_is_enabled &&
     183           0 :       PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx))
     184             :     {
     185           0 :       ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
     186           0 :       counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
     187             :                                               policies, policy_ids, n);
     188             :     }
     189             : 
     190           0 :   while (n_left)
     191             :     {
     192           0 :       if (*pp != 0)
     193           0 :         goto next;
     194             : 
     195           0 :       vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
     196             :         {
     197           0 :           p = pool_elt_at_index (im->policies, *i);
     198           0 :           if (PREDICT_FALSE (p->protocol &&
     199             :                              (p->protocol != ip4_5tuple->proto)))
     200           0 :             continue;
     201             : 
     202           0 :           if (ip4_5tuple->ip4_addr[0].as_u32 <
     203           0 :               clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
     204           0 :             continue;
     205             : 
     206           0 :           if (ip4_5tuple->ip4_addr[1].as_u32 >
     207           0 :               clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
     208           0 :             continue;
     209             : 
     210           0 :           if (ip4_5tuple->ip4_addr[0].as_u32 <
     211           0 :               clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
     212           0 :             continue;
     213             : 
     214           0 :           if (ip4_5tuple->ip4_addr[1].as_u32 >
     215           0 :               clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
     216           0 :             continue;
     217             : 
     218           0 :           if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
     219             :                              (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
     220             :                              (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
     221             :             {
     222           0 :               ip4_5tuple->port[0] = 0;
     223           0 :               ip4_5tuple->port[1] = 0;
     224           0 :               goto add_policy;
     225             :             }
     226             : 
     227           0 :           if (ip4_5tuple->port[0] < p->lport.start)
     228           0 :             continue;
     229             : 
     230           0 :           if (ip4_5tuple->port[0] > p->lport.stop)
     231           0 :             continue;
     232             : 
     233           0 :           if (ip4_5tuple->port[1] < p->rport.start)
     234           0 :             continue;
     235             : 
     236           0 :           if (ip4_5tuple->port[1] > p->rport.stop)
     237           0 :             continue;
     238             : 
     239           0 :         add_policy:
     240           0 :           *pp = p;
     241           0 :           *policy_id = *i;
     242           0 :           counter++;
     243           0 :           break;
     244             :         }
     245             : 
     246           0 :     next:
     247           0 :       n_left--;
     248           0 :       pp++;
     249           0 :       ip4_5tuple++;
     250           0 :       policy_id++;
     251             :     }
     252             : 
     253           0 :   if (flow_cache_enabled)
     254             :     {
     255           0 :       n_left = n;
     256           0 :       policy_id = policy_ids;
     257           0 :       ip4_5tuple = ip4_5tuples;
     258           0 :       pp = policies;
     259             : 
     260           0 :       while (n_left)
     261             :         {
     262           0 :           if (*pp != NULL)
     263             :             {
     264             :               /* Add an Entry in Flow cache */
     265           0 :               ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
     266             :                                                      *policy_id);
     267             :             }
     268             : 
     269           0 :           n_left--;
     270           0 :           policy_id++;
     271           0 :           ip4_5tuple++;
     272           0 :           pp++;
     273             :         }
     274             :     }
     275             : 
     276           0 :   return counter;
     277             : }
     278             : 
     279             : always_inline ipsec_policy_t *
     280          67 : ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
     281             :                                       u16 lp, u16 rp)
     282             : {
     283          67 :   ipsec_policy_t *p = NULL;
     284             :   ipsec4_hash_kv_16_8_t kv_result;
     285             :   u64 hash;
     286             : 
     287          67 :   if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
     288             :                      (pr != IP_PROTOCOL_SCTP)))
     289             :     {
     290           0 :       lp = 0;
     291           0 :       rp = 0;
     292             :     }
     293          67 :   ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
     294             :                                                    (ip4_address_t) ra },
     295             :                                      .port = { lp, rp },
     296             :                                      .proto = pr };
     297             : 
     298          67 :   hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
     299          67 :   hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
     300             : 
     301          67 :   ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
     302          67 :   kv_result = im->ipsec4_out_spd_hash_tbl[hash];
     303          67 :   ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
     304             : 
     305          67 :   if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
     306             :                                     (u64 *) &kv_result))
     307             :     {
     308          56 :       if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
     309             :         {
     310             :           /* Get the policy based on the index */
     311          50 :           p =
     312          50 :             pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
     313             :         }
     314             :     }
     315             : 
     316          67 :   return p;
     317             : }
     318             : 
     319             : always_inline ipsec_policy_t *
     320      183937 : ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
     321             :                            u16 rp, u8 flow_cache_enabled)
     322             : {
     323      183937 :   ipsec_main_t *im = &ipsec_main;
     324             :   ipsec_policy_t *p;
     325             :   ipsec_policy_t *policies[1];
     326             :   ipsec_fp_5tuple_t tuples[1];
     327             :   u32 fp_policy_ids[1];
     328             : 
     329             :   u32 *i;
     330             : 
     331      183937 :   if (!spd)
     332           0 :     return 0;
     333             : 
     334      183937 :   if (im->fp_spd_ipv4_out_is_enabled &&
     335          70 :       PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx))
     336             :     {
     337          70 :       ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
     338          70 :       ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
     339             :                                    fp_policy_ids, 1);
     340          70 :       p = policies[0];
     341          70 :       i = fp_policy_ids;
     342          70 :       if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
     343             :                          (pr != IP_PROTOCOL_SCTP)))
     344             :         {
     345           0 :           lp = 0;
     346           0 :           rp = 0;
     347             :         }
     348          70 :       goto add_flow_cache;
     349             :     }
     350             : 
     351      372720 :   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
     352             :     {
     353      372715 :       p = pool_elt_at_index (im->policies, *i);
     354      372715 :       if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
     355             :                          (p->protocol != pr)))
     356      149904 :         continue;
     357             : 
     358      222811 :       if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
     359        2049 :         continue;
     360             : 
     361      220762 :       if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
     362        6208 :         continue;
     363             : 
     364      214554 :       if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
     365           0 :         continue;
     366             : 
     367      214554 :       if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
     368       30691 :         continue;
     369             : 
     370      183863 :       if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
     371             :                          (pr != IP_PROTOCOL_SCTP)))
     372             :         {
     373      178087 :           lp = 0;
     374      178087 :           rp = 0;
     375      178087 :           goto add_flow_cache;
     376             :         }
     377             : 
     378        5776 :       if (lp < p->lport.start)
     379           0 :         continue;
     380             : 
     381        5776 :       if (lp > p->lport.stop)
     382           0 :         continue;
     383             : 
     384        5776 :       if (rp < p->rport.start)
     385           1 :         continue;
     386             : 
     387        5775 :       if (rp > p->rport.stop)
     388           0 :         continue;
     389             : 
     390        5775 :     add_flow_cache:
     391      183932 :       if (flow_cache_enabled)
     392             :         {
     393             :           /* Add an Entry in Flow cache */
     394          17 :           ipsec4_out_spd_add_flow_cache_entry (
     395             :             im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
     396          17 :             clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
     397             :         }
     398             : 
     399      183932 :       return p;
     400             :     }
     401           5 :   return 0;
     402             : }
     403             : 
     404             : always_inline uword
     405      234048 : ip6_addr_match_range (ip6_address_t *a, ip6_address_t *la, ip6_address_t *ua)
     406             : {
     407      234048 :   if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
     408      234048 :       (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
     409      204004 :     return 1;
     410       30044 :   return 0;
     411             : }
     412             : 
     413             : always_inline void
     414          69 : ipsec_fp_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *la,
     415             :                                 ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
     416             : 
     417             : {
     418          69 :   clib_memcpy (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
     419          69 :   clib_memcpy (&tuple->ip6_raddr, ra, sizeof (ip6_address_t));
     420             : 
     421          69 :   tuple->lport = lp;
     422          69 :   tuple->rport = rp;
     423          69 :   tuple->protocol = pr;
     424          69 :   tuple->is_ipv6 = 1;
     425          69 : }
     426             : 
     427             : always_inline ipsec_policy_t *
     428       87750 : ipsec6_output_policy_match (ipsec_spd_t *spd, ip6_address_t *la,
     429             :                             ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
     430             : {
     431       87750 :   ipsec_main_t *im = &ipsec_main;
     432             :   ipsec_policy_t *p;
     433             :   ipsec_policy_t *policies[1];
     434             :   ipsec_fp_5tuple_t tuples[1];
     435             :   u32 fp_policy_ids[1];
     436             : 
     437             :   u32 *i;
     438             : 
     439       87750 :   if (!spd)
     440           0 :     return 0;
     441             : 
     442       87750 :   if (im->fp_spd_ipv6_out_is_enabled &&
     443          69 :       PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx))
     444             :     {
     445             : 
     446          69 :       ipsec_fp_5tuple_from_ip6_range (&tuples[0], la, ra, lp, rp, pr);
     447          69 :       ipsec_fp_out_policy_match_n (&spd->fp_spd, 1, tuples, policies,
     448             :                                    fp_policy_ids, 1);
     449          69 :       p = policies[0];
     450          69 :       i = fp_policy_ids;
     451          69 :       return p;
     452             :     }
     453             : 
     454      175911 :   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
     455             :     {
     456      175523 :       p = pool_elt_at_index (im->policies, *i);
     457      175523 :       if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
     458             :                          (p->protocol != pr)))
     459       58186 :         continue;
     460             : 
     461      117337 :       if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
     462         626 :         continue;
     463             : 
     464      116711 :       if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
     465       29418 :         continue;
     466             : 
     467       87293 :       if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
     468             :                          (pr != IP_PROTOCOL_SCTP)))
     469       86257 :         return p;
     470             : 
     471        1036 :       if (lp < p->lport.start)
     472           0 :         continue;
     473             : 
     474        1036 :       if (lp > p->lport.stop)
     475           0 :         continue;
     476             : 
     477        1036 :       if (rp < p->rport.start)
     478           0 :         continue;
     479             : 
     480        1036 :       if (rp > p->rport.stop)
     481           0 :         continue;
     482             : 
     483        1036 :       return p;
     484             :     }
     485             : 
     486         388 :   return 0;
     487             : }
     488             : 
     489             : #endif /* !IPSEC_OUTPUT_H */

Generated by: LCOV version 1.14