Line data Source code
1 : /* 2 : * Copyright (c) 2020 Cisco and/or its affiliates. 3 : * Licensed under the Apache License, Version 2.0 (the "License"); 4 : * you may not use this file except in compliance with the License. 5 : * You may obtain a copy of the License at: 6 : * 7 : * http://www.apache.org/licenses/LICENSE-2.0 8 : * 9 : * Unless required by applicable law or agreed to in writing, software 10 : * distributed under the License is distributed on an "AS IS" BASIS, 11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 : * See the License for the specific language governing permissions and 13 : * limitations under the License. 14 : */ 15 : 16 : #ifndef included_ip_vtep_h 17 : #define included_ip_vtep_h 18 : 19 : #include <vppinfra/hash.h> 20 : #include <vnet/ip/ip.h> 21 : #include <vnet/ip/ip46_address.h> 22 : 23 : /** 24 : * @brief Tunnel endpoint key (IPv4) 25 : * 26 : * Tunnel modules maintain a set of vtep4_key_t-s to track local IP 27 : * addresses that have tunnels established. Bypass node consults the 28 : * corresponding set to decide whether a packet should bypass normal 29 : * processing and go directly to the tunnel protocol handler node. 30 : */ 31 : 32 : /* *INDENT-OFF* */ 33 : typedef CLIB_PACKED 34 : (struct { 35 : union { 36 : struct { 37 : ip4_address_t addr; 38 : u32 fib_index; 39 : }; 40 : u64 as_u64; 41 : }; 42 : }) vtep4_key_t; 43 : /* *INDENT-ON* */ 44 : 45 : /** 46 : * @brief Tunnel endpoint key (IPv6) 47 : * 48 : * Tunnel modules maintain a set of vtep6_key_t-s to track local IP 49 : * addresses that have tunnels established. Bypass node consults the 50 : * corresponding set to decide whether a packet should bypass normal 51 : * processing and go directly to the tunnel protocol handler node. 52 : */ 53 : 54 : /* *INDENT-OFF* */ 55 : typedef CLIB_PACKED 56 : (struct { 57 : ip6_address_t addr; 58 : u32 fib_index; 59 : }) vtep6_key_t; 60 : /* *INDENT-ON* */ 61 : 62 : typedef struct 63 : { 64 : uword *vtep4; /* local ip4 VTEPs keyed on their ip4 addr + fib_index */ 65 : uword *vtep6; /* local ip6 VTEPs keyed on their ip6 addr + fib_index */ 66 : } vtep_table_t; 67 : 68 : always_inline vtep_table_t 69 2300 : vtep_table_create () 70 : { 71 2300 : vtep_table_t t = { }; 72 2300 : t.vtep6 = hash_create_mem (0, sizeof (vtep6_key_t), sizeof (uword)); 73 2300 : return t; 74 : } 75 : 76 : uword vtep_addr_ref (vtep_table_t * t, u32 fib_index, ip46_address_t * ip); 77 : uword vtep_addr_unref (vtep_table_t * t, u32 fib_index, ip46_address_t * ip); 78 : 79 : always_inline void 80 0 : vtep4_key_init (vtep4_key_t * k4) 81 : { 82 0 : k4->as_u64 = ~((u64) 0); 83 0 : } 84 : 85 : always_inline void 86 0 : vtep6_key_init (vtep6_key_t * k6) 87 : { 88 0 : ip6_address_set_zero (&k6->addr); 89 0 : k6->fib_index = (u32) ~ 0; 90 0 : } 91 : 92 : enum 93 : { 94 : VTEP_CHECK_FAIL = 0, 95 : VTEP_CHECK_PASS = 1, 96 : VTEP_CHECK_PASS_UNCHANGED = 2 97 : }; 98 : 99 : always_inline u8 100 0 : vtep4_check (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40, 101 : vtep4_key_t * last_k4) 102 : { 103 : vtep4_key_t k4; 104 0 : k4.addr.as_u32 = ip40->dst_address.as_u32; 105 0 : k4.fib_index = vlib_buffer_get_ip4_fib_index (b0); 106 0 : if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64)) 107 0 : return VTEP_CHECK_PASS_UNCHANGED; 108 0 : if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64))) 109 0 : return VTEP_CHECK_FAIL; 110 0 : last_k4->as_u64 = k4.as_u64; 111 0 : return VTEP_CHECK_PASS; 112 : } 113 : 114 : typedef struct 115 : { 116 : vtep4_key_t vtep4_cache[8]; 117 : int idx; 118 : } vtep4_cache_t; 119 : 120 : #ifdef CLIB_HAVE_VEC512 121 : always_inline u8 122 0 : vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40, 123 : vtep4_key_t * last_k4, vtep4_cache_t * vtep4_u512) 124 : { 125 : vtep4_key_t k4; 126 0 : k4.addr.as_u32 = ip40->dst_address.as_u32; 127 0 : k4.fib_index = vlib_buffer_get_ip4_fib_index (b0); 128 : 129 0 : if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64)) 130 0 : return VTEP_CHECK_PASS_UNCHANGED; 131 : 132 0 : u64x8 k4_u64x8 = u64x8_splat (k4.as_u64); 133 0 : u64x8 cache = u64x8_load_unaligned (vtep4_u512->vtep4_cache); 134 0 : u8 result = u64x8_is_equal_mask (cache, k4_u64x8); 135 0 : if (PREDICT_TRUE (result != 0)) 136 : { 137 0 : last_k4->as_u64 = 138 0 : vtep4_u512->vtep4_cache[count_trailing_zeros (result)].as_u64; 139 0 : return VTEP_CHECK_PASS_UNCHANGED; 140 : } 141 : 142 0 : if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64))) 143 0 : return VTEP_CHECK_FAIL; 144 : 145 0 : vtep4_u512->vtep4_cache[vtep4_u512->idx].as_u64 = k4.as_u64; 146 0 : vtep4_u512->idx = (vtep4_u512->idx + 1) & 0x7; 147 : 148 0 : last_k4->as_u64 = k4.as_u64; 149 : 150 0 : return VTEP_CHECK_PASS; 151 : } 152 : #endif 153 : 154 : always_inline u8 155 0 : vtep6_check (vtep_table_t * t, vlib_buffer_t * b0, ip6_header_t * ip60, 156 : vtep6_key_t * last_k6) 157 : { 158 : vtep6_key_t k6; 159 0 : k6.fib_index = vlib_buffer_get_ip6_fib_index (b0); 160 0 : if (PREDICT_TRUE (k6.fib_index == last_k6->fib_index 161 : && ip60->dst_address.as_u64[0] == last_k6->addr.as_u64[0] 162 : && ip60->dst_address.as_u64[1] == 163 : last_k6->addr.as_u64[1])) 164 : { 165 0 : return VTEP_CHECK_PASS_UNCHANGED; 166 : } 167 0 : k6.addr = ip60->dst_address; 168 0 : if (PREDICT_FALSE (!hash_get_mem (t->vtep6, &k6))) 169 0 : return VTEP_CHECK_FAIL; 170 0 : *last_k6 = k6; 171 0 : return VTEP_CHECK_PASS; 172 : } 173 : #endif /* included_ip_vtep_h */ 174 : 175 : /* 176 : * fd.io coding-style-patch-verification: ON 177 : * 178 : * Local Variables: 179 : * eval: (c-set-style "gnu") 180 : * End: 181 : */