Line data Source code
1 : /*
2 : *------------------------------------------------------------------
3 : * Copyright (c) 2017 Cisco and/or its affiliates.
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at:
7 : *
8 : * http://www.apache.org/licenses/LICENSE-2.0
9 : *
10 : * Unless required by applicable law or agreed to in writing, software
11 : * distributed under the License is distributed on an "AS IS" BASIS,
12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : * See the License for the specific language governing permissions and
14 : * limitations under the License.
15 : *------------------------------------------------------------------
16 : */
17 :
18 : #include <stddef.h>
19 : #include <netinet/in.h>
20 :
21 : #include <vlibapi/api.h>
22 : #include <vlibmemory/api.h>
23 :
24 : #include <vlib/vlib.h>
25 : #include <vnet/vnet.h>
26 : #include <vppinfra/error.h>
27 : #include <vnet/plugin/plugin.h>
28 : #include <acl/acl.h>
29 : #include <vppinfra/bihash_48_8.h>
30 :
31 : #include "hash_lookup.h"
32 : #include "hash_lookup_private.h"
33 :
34 :
35 97651 : always_inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, u32 lc_index)
36 : {
37 97651 : applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
38 :
39 : /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
40 : : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
41 : */
42 97651 : return applied_hash_aces;
43 : }
44 :
45 :
46 : static void
47 82930 : hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
48 : {
49 : DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
50 : kv->key[0], kv->key[1], kv->key[2],
51 : kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
52 82930 : BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
53 82930 : }
54 :
55 : /*
56 : * TupleMerge
57 : *
58 : * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
59 : * based on the TupleMerge [1] simulator kindly made available
60 : * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
61 : * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
62 : * refactoring by Andrew Yourtchenko.
63 : *
64 : * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
65 : * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
66 : *
67 : */
68 :
69 : static int
70 7642 : count_bits (u64 word)
71 : {
72 7642 : int counter = 0;
73 364042 : while (word)
74 : {
75 356400 : counter += word & 1;
76 356400 : word >>= 1;
77 : }
78 7642 : return counter;
79 : }
80 :
81 : /* check if mask2 can be contained by mask1 */
82 : static u8
83 74516 : first_mask_contains_second_mask(int is_ip6, fa_5tuple_t * mask1, fa_5tuple_t * mask2)
84 : {
85 : int i;
86 74516 : if (is_ip6)
87 : {
88 110565 : for (i = 0; i < 2; i++)
89 : {
90 73712 : if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
91 73712 : mask1->ip6_addr[0].as_u64[i])
92 6 : return 0;
93 73706 : if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
94 73706 : mask1->ip6_addr[1].as_u64[i])
95 0 : return 0;
96 : }
97 : }
98 : else
99 : {
100 : /* check the pads, both masks must have it 0 */
101 37657 : u32 padcheck = 0;
102 : int i;
103 263599 : for (i=0; i<6; i++) {
104 225942 : padcheck |= mask1->l3_zero_pad[i];
105 225942 : padcheck |= mask2->l3_zero_pad[i];
106 : }
107 37657 : if (padcheck != 0)
108 0 : return 0;
109 37657 : if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
110 37657 : mask1->ip4_addr[0].as_u32)
111 6 : return 0;
112 37651 : if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
113 37651 : mask1->ip4_addr[1].as_u32)
114 0 : return 0;
115 : }
116 :
117 : /* take care if port are not exact-match */
118 74504 : if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
119 785 : return 0;
120 :
121 73719 : if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
122 0 : return 0;
123 :
124 73719 : return 1;
125 : }
126 :
127 :
128 :
129 : /*
130 : * TupleMerge:
131 : *
132 : * Consider the situation when we have to create a new table
133 : * T for a given rule R. This occurs for the first rule inserted and
134 : * for later rules if it is incompatible with all existing tables.
135 : * In this event, we need to determine mT for a new table.
136 : * Setting mT = mR is not a good strategy; if another similar,
137 : * but slightly less specific, rule appears we will be unable to
138 : * add it to T and will thus have to create another new table. We
139 : * thus consider two factors: is the rule more strongly aligned
140 : * with source or destination addresses (usually the two most
141 : * important fields) and how much slack needs to be given to
142 : * allow for other rules. If the source and destination addresses
143 : * are close together (within 4 bits for our experiments), we use
144 : * both of them. Otherwise, we drop the smaller (less specific)
145 : * address and its associated port field from consideration; R is
146 : * predominantly aligned with one of the two fields and should
147 : * be grouped with other similar rules. This is similar to TSS
148 : * dropping port fields, but since it is based on observable rule
149 : * characteristics it is more likely to keep important fields and
150 : * discard less useful ones.
151 : * We then look at the absolute lengths of the addresses. If
152 : * the address is long, we are more likely to try to add shorter
153 : * lengths and likewise the reverse. We thus remove a few bits
154 : * from both address fields with more bits removed from longer
155 : * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
156 : * than 24, 2 for more than 16, and so on (so 8 and fewer bits
157 : * don’t have any removed). We only do this for prefix fields like
158 : * addresses; both range fields (like ports) and exact match fields
159 : * (like protocol) should remain as they are.
160 : */
161 :
162 :
163 : static u32
164 618 : shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
165 : {
166 618 : if (mask > thresh)
167 0 : return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
168 : else
169 618 : return else_val;
170 : }
171 :
172 : static void
173 3602 : relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
174 3602 : int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
175 :
176 3602 : int *shifts = shifts_per_relax[relax2];
177 3602 : if(ip4_mask->as_u32 == 0xffffffff)
178 3396 : ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
179 : else
180 206 : ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
181 206 : shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
182 206 : shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
183 3602 : }
184 :
185 : static void
186 2020 : relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
187 : /*
188 : * This "better than nothing" relax logic is based on heuristics
189 : * from IPv6 knowledge, and may not be optimal.
190 : * Some further tuning may be needed in the future.
191 : */
192 2020 : if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
193 1936 : if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
194 : /* relax a /128 down to /64 - likely to have more hosts */
195 1934 : ip6_mask->as_u64[1] = 0;
196 2 : } else if (ip6_mask->as_u64[1] == 0) {
197 : /* relax a /64 down to /56 - likely to have more subnets */
198 2 : ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
199 : }
200 : }
201 2020 : }
202 :
203 : static void
204 2811 : relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
205 2811 : fa_5tuple_t save_mask = *mask;
206 :
207 2811 : int counter_s = 0, counter_d = 0;
208 2811 : if (is_ip6) {
209 : int i;
210 3030 : for(i=0; i<2; i++){
211 2020 : counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
212 2020 : counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
213 : }
214 : } else {
215 1801 : counter_s += count_bits(mask->ip4_addr[0].as_u32);
216 1801 : counter_d += count_bits(mask->ip4_addr[1].as_u32);
217 : }
218 :
219 : /*
220 : * is the rule more strongly aligned with source or destination addresses
221 : * (usually the two most important fields) and how much slack needs to be
222 : * given to allow for other rules. If the source and destination addresses
223 : * are close together (within 4 bits for our experiments), we use both of them.
224 : * Otherwise, we drop the smaller (less specific) address and its associated
225 : * port field from consideration
226 : */
227 2811 : const int deltaThreshold = 4;
228 : /* const int deltaThreshold = 8; if IPV6? */
229 2811 : int delta = counter_s - counter_d;
230 2811 : if (-delta > deltaThreshold) {
231 0 : if (is_ip6)
232 0 : mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
233 : else
234 0 : mask->ip4_addr[0].as_u32 = 0;
235 0 : mask->l4.port[0] = 0;
236 2811 : } else if (delta > deltaThreshold) {
237 0 : if (is_ip6)
238 0 : mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
239 : else
240 0 : mask->ip4_addr[1].as_u32 = 0;
241 0 : mask->l4.port[1] = 0;
242 : }
243 :
244 2811 : if (is_ip6) {
245 1010 : relax_ip6_addr(&mask->ip6_addr[0], relax2);
246 1010 : relax_ip6_addr(&mask->ip6_addr[1], relax2);
247 : } else {
248 1801 : relax_ip4_addr(&mask->ip4_addr[0], relax2);
249 1801 : relax_ip4_addr(&mask->ip4_addr[1], relax2);
250 : }
251 2811 : mask->pkt.is_nonfirst_fragment = 0;
252 2811 : mask->pkt.l4_valid = 0;
253 2811 : if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
254 : DBG( "TM-relaxing-ERROR");
255 0 : *mask = save_mask;
256 : }
257 : DBG( "TM-relaxing-end");
258 2811 : }
259 :
260 : static u32
261 19335 : find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
262 : {
263 : ace_mask_type_entry_t *mte;
264 : /* *INDENT-OFF* */
265 28811 : pool_foreach (mte, am->ace_mask_type_pool)
266 : {
267 28534 : if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
268 19058 : return (mte - am->ace_mask_type_pool);
269 : }
270 : /* *INDENT-ON* */
271 277 : return ~0;
272 : }
273 :
274 : static u32
275 19335 : assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
276 : {
277 19335 : u32 mask_type_index = find_mask_type_index(am, mask);
278 : ace_mask_type_entry_t *mte;
279 19335 : if(~0 == mask_type_index) {
280 277 : pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
281 277 : mask_type_index = mte - am->ace_mask_type_pool;
282 277 : clib_memcpy_fast(&mte->mask, mask, sizeof(mte->mask));
283 277 : mte->refcount = 0;
284 :
285 : /*
286 : * We can use only 16 bits, since in the match there is only u16 field.
287 : * Realistically, once you go to 64K of mask types, it is a huge
288 : * problem anyway, so we might as well stop half way.
289 : */
290 277 : ASSERT(mask_type_index < 32768);
291 : }
292 19335 : mte = am->ace_mask_type_pool + mask_type_index;
293 19335 : mte->refcount++;
294 : DBG0("ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
295 19335 : return mask_type_index;
296 : }
297 :
298 : static void
299 70908 : lock_mask_type_index(acl_main_t *am, u32 mask_type_index)
300 : {
301 : DBG0("LOCK MTE index %d", mask_type_index);
302 70908 : ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
303 70908 : mte->refcount++;
304 : DBG0("LOCK MTE index %d new refcount %d", mask_type_index, mte->refcount);
305 70908 : }
306 :
307 :
308 : static void
309 90243 : release_mask_type_index(acl_main_t *am, u32 mask_type_index)
310 : {
311 : DBG0("RELEAS MTE index %d", mask_type_index);
312 90243 : ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
313 90243 : mte->refcount--;
314 : DBG0("RELEAS MTE index %d new refcount %d", mask_type_index, mte->refcount);
315 90243 : if (mte->refcount == 0) {
316 : /* we are not using this entry anymore */
317 277 : clib_memset(mte, 0xae, sizeof(*mte));
318 277 : pool_put(am->ace_mask_type_pool, mte);
319 : }
320 90243 : }
321 :
322 :
323 : static u32
324 73719 : tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
325 : {
326 73719 : u32 mask_type_index = ~0;
327 73719 : u32 for_mask_type_index = ~0;
328 73719 : ace_mask_type_entry_t *mte = 0;
329 : int order_index;
330 : /* look for existing mask comparable with the one in input */
331 :
332 73719 : hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
333 : hash_applied_mask_info_t *minfo;
334 :
335 73719 : if (vec_len(*hash_applied_mask_info_vec) > 0) {
336 72502 : for(order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
337 71705 : minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
338 71705 : for_mask_type_index = minfo->mask_type_index;
339 71705 : mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
340 71705 : if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
341 70908 : mask_type_index = (mte - am->ace_mask_type_pool);
342 70908 : lock_mask_type_index(am, mask_type_index);
343 70908 : break;
344 : }
345 : }
346 : }
347 :
348 73719 : if(~0 == mask_type_index) {
349 : /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
350 : DBG( "TM-assigning mask type index-new one");
351 2811 : fa_5tuple_t relaxed_mask = *mask;
352 2811 : relax_tuple(&relaxed_mask, is_ip6, 0);
353 2811 : mask_type_index = assign_mask_type_index(am, &relaxed_mask);
354 :
355 2811 : hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
356 :
357 2811 : int spot = vec_len((*hash_applied_mask_info_vec));
358 2811 : vec_validate((*hash_applied_mask_info_vec), spot);
359 2811 : minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
360 2811 : minfo->mask_type_index = mask_type_index;
361 2811 : minfo->num_entries = 0;
362 2811 : minfo->max_collisions = 0;
363 2811 : minfo->first_rule_index = ~0;
364 :
365 : /*
366 : * We can use only 16 bits, since in the match there is only u16 field.
367 : * Realistically, once you go to 64K of mask types, it is a huge
368 : * problem anyway, so we might as well stop half way.
369 : */
370 2811 : ASSERT(mask_type_index < 32768);
371 : }
372 73719 : mte = am->ace_mask_type_pool + mask_type_index;
373 : DBG0("TM-ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
374 73719 : return mask_type_index;
375 : }
376 :
377 :
378 : static void
379 129975 : fill_applied_hash_ace_kv(acl_main_t *am,
380 : applied_hash_ace_entry_t **applied_hash_aces,
381 : u32 lc_index,
382 : u32 new_index, clib_bihash_kv_48_8_t *kv)
383 : {
384 129975 : fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
385 129975 : hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value;
386 129975 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
387 129975 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
388 :
389 : /* apply the mask to ace key */
390 129975 : hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
391 129975 : ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, pae->mask_type_index);
392 :
393 129975 : u64 *pmatch = (u64 *) &ace_info->match;
394 129975 : u64 *pmask = (u64 *)&mte->mask;
395 129975 : u64 *pkey = (u64 *)kv->key;
396 :
397 129975 : *pkey++ = *pmatch++ & *pmask++;
398 129975 : *pkey++ = *pmatch++ & *pmask++;
399 129975 : *pkey++ = *pmatch++ & *pmask++;
400 129975 : *pkey++ = *pmatch++ & *pmask++;
401 129975 : *pkey++ = *pmatch++ & *pmask++;
402 129975 : *pkey++ = *pmatch++ & *pmask++;
403 :
404 129975 : kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
405 129975 : kv_key->pkt.lc_index = lc_index;
406 129975 : kv_val->as_u64 = 0;
407 129975 : kv_val->applied_entry_index = new_index;
408 129975 : }
409 :
410 : static void
411 56256 : add_del_hashtable_entry(acl_main_t *am,
412 : u32 lc_index,
413 : applied_hash_ace_entry_t **applied_hash_aces,
414 : u32 index, int is_add)
415 : {
416 : clib_bihash_kv_48_8_t kv;
417 :
418 56256 : fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
419 56256 : hashtable_add_del(am, &kv, is_add);
420 56256 : }
421 :
422 :
423 : static void
424 23932 : remake_hash_applied_mask_info_vec (acl_main_t * am,
425 : applied_hash_ace_entry_t **
426 : applied_hash_aces, u32 lc_index)
427 : {
428 : DBG0("remake applied hash mask info lc_index %d", lc_index);
429 23932 : hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
430 23932 : vec_new (hash_applied_mask_info_t, 0);
431 :
432 : hash_applied_mask_info_t *minfo;
433 : int i;
434 392243 : for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
435 : {
436 368311 : applied_hash_ace_entry_t *pae =
437 368311 : vec_elt_at_index ((*applied_hash_aces), i);
438 :
439 : /* check if mask_type_index is already there */
440 368311 : u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
441 : int search;
442 464278 : for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
443 95967 : search++)
444 : {
445 436441 : minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
446 436441 : if (minfo->mask_type_index == pae->mask_type_index)
447 340474 : break;
448 : }
449 :
450 368311 : vec_validate ((new_hash_applied_mask_info_vec), search);
451 368311 : minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
452 368311 : if (search == new_pointer)
453 : {
454 : DBG0("remaking index %d", search);
455 27837 : minfo->mask_type_index = pae->mask_type_index;
456 27837 : minfo->num_entries = 0;
457 27837 : minfo->max_collisions = 0;
458 27837 : minfo->first_rule_index = ~0;
459 : }
460 :
461 368311 : minfo->num_entries = minfo->num_entries + 1;
462 :
463 368311 : if (vec_len (pae->colliding_rules) > minfo->max_collisions)
464 37662 : minfo->max_collisions = vec_len (pae->colliding_rules);
465 :
466 368311 : if (minfo->first_rule_index > i)
467 27837 : minfo->first_rule_index = i;
468 : }
469 :
470 23932 : hash_applied_mask_info_t **hash_applied_mask_info_vec =
471 23932 : vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
472 :
473 23932 : vec_free ((*hash_applied_mask_info_vec));
474 23932 : (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
475 23932 : }
476 :
477 : static void
478 73719 : vec_del_collision_rule (collision_match_rule_t ** pvec,
479 : u32 applied_entry_index)
480 : {
481 73719 : u32 i = 0;
482 73719 : u32 deleted = 0;
483 218273 : while (i < _vec_len ((*pvec)))
484 : {
485 144554 : collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
486 144554 : if (cr->applied_entry_index == applied_entry_index)
487 : {
488 : /* vec_del1 ((*pvec), i) would be more efficient but would reorder the elements. */
489 73719 : vec_delete((*pvec), 1, i);
490 73719 : deleted++;
491 : DBG0("vec_del_collision_rule deleting one at index %d", i);
492 : }
493 : else
494 : {
495 70835 : i++;
496 : }
497 : }
498 73719 : ASSERT(deleted > 0);
499 73719 : }
500 :
501 : static void
502 : acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae);
503 :
504 : static void
505 73719 : del_colliding_rule (applied_hash_ace_entry_t ** applied_hash_aces,
506 : u32 head_index, u32 applied_entry_index)
507 : {
508 : DBG0("DEL COLLIDING RULE: head_index %d applied index %d", head_index, applied_entry_index);
509 :
510 :
511 73719 : applied_hash_ace_entry_t *head_pae =
512 73719 : vec_elt_at_index ((*applied_hash_aces), head_index);
513 : if (ACL_HASH_LOOKUP_DEBUG > 0)
514 : acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
515 73719 : vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
516 73719 : if (vec_len(head_pae->colliding_rules) == 0) {
517 26674 : vec_free(head_pae->colliding_rules);
518 : }
519 : if (ACL_HASH_LOOKUP_DEBUG > 0)
520 : acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
521 73719 : }
522 :
523 : static void
524 73719 : add_colliding_rule (acl_main_t * am,
525 : applied_hash_ace_entry_t ** applied_hash_aces,
526 : u32 head_index, u32 applied_entry_index)
527 : {
528 73719 : applied_hash_ace_entry_t *head_pae =
529 73719 : vec_elt_at_index ((*applied_hash_aces), head_index);
530 73719 : applied_hash_ace_entry_t *pae =
531 73719 : vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
532 : DBG0("ADD COLLIDING RULE: head_index %d applied index %d", head_index, applied_entry_index);
533 : if (ACL_HASH_LOOKUP_DEBUG > 0)
534 : acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
535 :
536 : collision_match_rule_t cr;
537 :
538 73719 : cr.acl_index = pae->acl_index;
539 73719 : cr.ace_index = pae->ace_index;
540 73719 : cr.acl_position = pae->acl_position;
541 73719 : cr.applied_entry_index = applied_entry_index;
542 73719 : cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
543 73719 : pae->collision_head_ae_index = head_index;
544 73719 : vec_add1 (head_pae->colliding_rules, cr);
545 : if (ACL_HASH_LOOKUP_DEBUG > 0)
546 : acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
547 73719 : }
548 :
549 : static u32
550 73719 : activate_applied_ace_hash_entry(acl_main_t *am,
551 : u32 lc_index,
552 : applied_hash_ace_entry_t **applied_hash_aces,
553 : u32 new_index)
554 : {
555 : clib_bihash_kv_48_8_t kv;
556 73719 : ASSERT(new_index != ~0);
557 : DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
558 :
559 73719 : fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
560 :
561 : DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
562 : kv.key[0], kv.key[1], kv.key[2],
563 : kv.key[3], kv.key[4], kv.key[5]);
564 :
565 : clib_bihash_kv_48_8_t result;
566 73719 : hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
567 73719 : int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
568 73719 : ASSERT(new_index != ~0);
569 73719 : ASSERT(new_index < vec_len((*applied_hash_aces)));
570 73719 : if (res == 0) {
571 47045 : u32 first_index = result_val->applied_entry_index;
572 47045 : ASSERT(first_index != ~0);
573 47045 : ASSERT(first_index < vec_len((*applied_hash_aces)));
574 : /* There already exists an entry or more. Append at the end. */
575 : DBG("A key already exists, with applied entry index: %d", first_index);
576 47045 : add_colliding_rule(am, applied_hash_aces, first_index, new_index);
577 47045 : return first_index;
578 : } else {
579 : /* It's the very first entry */
580 26674 : hashtable_add_del(am, &kv, 1);
581 26674 : ASSERT(new_index != ~0);
582 26674 : add_colliding_rule(am, applied_hash_aces, new_index, new_index);
583 26674 : return new_index;
584 : }
585 : }
586 :
587 :
588 : static void
589 73719 : assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
590 : {
591 73719 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
592 73719 : hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
593 :
594 : ace_mask_type_entry_t *mte;
595 : fa_5tuple_t mask;
596 : /*
597 : * Start taking base_mask associated to ace, and essentially copy it.
598 : * With TupleMerge we will assign a relaxed mask here.
599 : */
600 73719 : mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
601 73719 : mask = mte->mask;
602 73719 : if (am->use_tuple_merge)
603 73719 : pae->mask_type_index = tm_assign_mask_type_index(am, &mask, is_ip6, lc_index);
604 : else
605 0 : pae->mask_type_index = assign_mask_type_index(am, &mask);
606 73719 : }
607 :
608 : static void
609 : split_partition(acl_main_t *am, u32 first_index,
610 : u32 lc_index, int is_ip6);
611 :
612 :
613 : static void
614 73719 : check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
615 : {
616 73719 : applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
617 73719 : applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
618 73719 : if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
619 0 : split_partition(am, first_index, lc_index, is_ip6);
620 : }
621 73719 : }
622 :
623 : void
624 11966 : hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
625 : {
626 : int i;
627 :
628 : DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
629 11966 : if (!am->acl_lookup_hash_initialized) {
630 12 : BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
631 : am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory);
632 12 : am->acl_lookup_hash_initialized = 1;
633 : }
634 :
635 11966 : vec_validate(am->hash_entry_vec_by_lc_index, lc_index);
636 11966 : vec_validate(am->hash_acl_infos, acl_index);
637 11966 : applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
638 :
639 11966 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
640 11966 : u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
641 :
642 11966 : int base_offset = vec_len(*applied_hash_aces);
643 :
644 : /* Update the bitmap of the mask types with which the lookup
645 : needs to happen for the ACLs applied to this lc_index */
646 11966 : applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
647 11966 : vec_validate((*applied_hash_acls), lc_index);
648 11966 : applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
649 :
650 : /* ensure the list of applied hash acls is initialized and add this acl# to it */
651 32414 : u32 index = vec_search(pal->applied_acls, acl_index);
652 11966 : if (index != ~0) {
653 0 : clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
654 : acl_index, lc_index);
655 0 : ASSERT(0);
656 0 : return;
657 : }
658 11966 : vec_add1(pal->applied_acls, acl_index);
659 20384 : u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
660 11966 : if (index2 != ~0) {
661 0 : clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
662 : acl_index, lc_index);
663 0 : ASSERT(0);
664 0 : return;
665 : }
666 11966 : vec_add1((*hash_acl_applied_lc_index), lc_index);
667 :
668 : /*
669 : * if the applied ACL is empty, the current code will cause a
670 : * different behavior compared to current linear search: an empty ACL will
671 : * simply fallthrough to the next ACL, or the default deny in the end.
672 : *
673 : * This is not a problem, because after vpp-dev discussion,
674 : * the consensus was it should not be possible to apply the non-existent
675 : * ACL, so the change adding this code also takes care of that.
676 : */
677 :
678 :
679 11966 : vec_validate(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
680 :
681 : /* since we know (in case of no split) how much we expand, preallocate that space */
682 11966 : if (vec_len(ha->rules) > 0) {
683 11966 : int old_vec_len = vec_len(*applied_hash_aces);
684 11966 : vec_validate((*applied_hash_aces), old_vec_len + vec_len(ha->rules) - 1);
685 11966 : vec_set_len ((*applied_hash_aces), old_vec_len);
686 : }
687 :
688 : /* add the rules from the ACL to the hash table for lookup and append to the vector*/
689 85685 : for(i=0; i < vec_len(ha->rules); i++) {
690 : /*
691 : * Expand the applied aces vector to fit a new entry.
692 : * One by one not to upset split_partition() if it is called.
693 : */
694 73719 : vec_resize((*applied_hash_aces), 1);
695 :
696 73719 : int is_ip6 = ha->rules[i].match.pkt.is_ip6;
697 73719 : u32 new_index = base_offset + i;
698 73719 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
699 73719 : pae->acl_index = acl_index;
700 73719 : pae->ace_index = ha->rules[i].ace_index;
701 73719 : pae->acl_position = acl_position;
702 73719 : pae->action = ha->rules[i].action;
703 73719 : pae->hitcount = 0;
704 73719 : pae->hash_ace_info_index = i;
705 : /* we might link it in later */
706 73719 : pae->collision_head_ae_index = ~0;
707 73719 : pae->colliding_rules = NULL;
708 73719 : pae->mask_type_index = ~0;
709 73719 : assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
710 73719 : u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
711 73719 : if (am->use_tuple_merge)
712 73719 : check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
713 : }
714 11966 : remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
715 : }
716 :
717 : static u32
718 56887 : find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
719 : {
720 56887 : ASSERT(curr_index != ~0);
721 56887 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), curr_index);
722 56887 : ASSERT(pae);
723 56887 : ASSERT(pae->collision_head_ae_index != ~0);
724 56887 : return pae->collision_head_ae_index;
725 : }
726 :
727 : static void
728 29582 : set_collision_head_ae_index(applied_hash_ace_entry_t **applied_hash_aces, collision_match_rule_t *colliding_rules, u32 new_index)
729 : {
730 : collision_match_rule_t *cr;
731 77034 : vec_foreach(cr, colliding_rules) {
732 47452 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), cr->applied_entry_index);
733 47452 : pae->collision_head_ae_index = new_index;
734 : }
735 29582 : }
736 :
737 : static void
738 39424 : move_applied_ace_hash_entry(acl_main_t *am,
739 : u32 lc_index,
740 : applied_hash_ace_entry_t **applied_hash_aces,
741 : u32 old_index, u32 new_index)
742 : {
743 39424 : ASSERT(old_index != ~0);
744 39424 : ASSERT(new_index != ~0);
745 : /* move the entry */
746 39424 : *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
747 :
748 : /* update the linkage and hash table if necessary */
749 39424 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
750 39424 : applied_hash_ace_entry_t *new_pae = vec_elt_at_index((*applied_hash_aces), new_index);
751 :
752 : if (ACL_HASH_LOOKUP_DEBUG > 0) {
753 : clib_warning("Moving pae from %d to %d", old_index, new_index);
754 : acl_plugin_print_pae(am->vlib_main, old_index, pae);
755 : }
756 :
757 39424 : if (pae->collision_head_ae_index == old_index) {
758 : /* first entry - so the hash points to it, update */
759 18346 : add_del_hashtable_entry(am, lc_index,
760 : applied_hash_aces, new_index, 1);
761 : }
762 39424 : if (new_pae->colliding_rules) {
763 : /* update the information within the collision rule entry */
764 18346 : ASSERT(vec_len(new_pae->colliding_rules) > 0);
765 18346 : collision_match_rule_t *cr = vec_elt_at_index (new_pae->colliding_rules, 0);
766 18346 : ASSERT(cr->applied_entry_index == old_index);
767 18346 : cr->applied_entry_index = new_index;
768 18346 : set_collision_head_ae_index(applied_hash_aces, new_pae->colliding_rules, new_index);
769 : } else {
770 : /* find the index in the collision rule entry on the head element */
771 21078 : u32 head_index = find_head_applied_ace_index(applied_hash_aces, new_index);
772 21078 : ASSERT(head_index != ~0);
773 21078 : applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
774 21078 : ASSERT(vec_len(head_pae->colliding_rules) > 0);
775 : u32 i;
776 71828 : for (i=0; i<vec_len(head_pae->colliding_rules); i++) {
777 50750 : collision_match_rule_t *cr = vec_elt_at_index (head_pae->colliding_rules, i);
778 50750 : if (cr->applied_entry_index == old_index) {
779 21078 : cr->applied_entry_index = new_index;
780 : }
781 : }
782 : if (ACL_HASH_LOOKUP_DEBUG > 0) {
783 : clib_warning("Head pae at index %d after adjustment", head_index);
784 : acl_plugin_print_pae(am->vlib_main, head_index, head_pae);
785 : }
786 : }
787 : /* invalidate the old entry */
788 39424 : pae->collision_head_ae_index = ~0;
789 39424 : pae->colliding_rules = NULL;
790 39424 : }
791 :
792 : static void
793 73719 : deactivate_applied_ace_hash_entry(acl_main_t *am,
794 : u32 lc_index,
795 : applied_hash_ace_entry_t **applied_hash_aces,
796 : u32 old_index)
797 : {
798 73719 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
799 : DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
800 : if (ACL_HASH_LOOKUP_DEBUG > 0) {
801 : clib_warning("Deactivating pae at index %d", old_index);
802 : acl_plugin_print_pae(am->vlib_main, old_index, pae);
803 : }
804 :
805 73719 : if (pae->collision_head_ae_index != old_index) {
806 : DBG("UNAPPLY = index %d has collision head %d", old_index, pae->collision_head_ae_index);
807 :
808 35809 : u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
809 35809 : ASSERT(head_index != ~0);
810 35809 : del_colliding_rule(applied_hash_aces, head_index, old_index);
811 :
812 : } else {
813 : /* It was the first entry. We need either to reset the hash entry or delete it */
814 : /* delete our entry from the collision vector first */
815 37910 : del_colliding_rule(applied_hash_aces, old_index, old_index);
816 49146 : if (vec_len(pae->colliding_rules) > 0) {
817 11236 : u32 next_pae_index = pae->colliding_rules[0].applied_entry_index;
818 11236 : applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), next_pae_index);
819 : /* Remove ourselves and transfer the ownership of the colliding rules vector */
820 11236 : next_pae->colliding_rules = pae->colliding_rules;
821 11236 : set_collision_head_ae_index(applied_hash_aces, next_pae->colliding_rules, next_pae_index);
822 11236 : add_del_hashtable_entry(am, lc_index,
823 : applied_hash_aces, next_pae_index, 1);
824 : } else {
825 : /* no next entry, so just delete the entry in the hash table */
826 26674 : add_del_hashtable_entry(am, lc_index,
827 : applied_hash_aces, old_index, 0);
828 : }
829 : }
830 : DBG0("Releasing mask type index %d for pae index %d on lc_index %d", pae->mask_type_index, old_index, lc_index);
831 73719 : release_mask_type_index(am, pae->mask_type_index);
832 : /* invalidate the old entry */
833 73719 : pae->mask_type_index = ~0;
834 73719 : pae->collision_head_ae_index = ~0;
835 : /* always has to be 0 */
836 73719 : pae->colliding_rules = NULL;
837 73719 : }
838 :
839 :
840 : void
841 11966 : hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
842 : {
843 : int i;
844 :
845 : DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
846 11966 : applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
847 11966 : applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
848 :
849 11966 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
850 11966 : u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
851 :
852 : if (ACL_HASH_LOOKUP_DEBUG > 0) {
853 : clib_warning("unapplying acl %d", acl_index);
854 : acl_plugin_show_tables_mask_type();
855 : acl_plugin_show_tables_acl_hash_info(acl_index);
856 : acl_plugin_show_tables_applied_info(lc_index);
857 : }
858 :
859 : /* remove this acl# from the list of applied hash acls */
860 20894 : u32 index = vec_search(pal->applied_acls, acl_index);
861 11966 : if (index == ~0) {
862 0 : clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
863 : acl_index, lc_index);
864 0 : return;
865 : }
866 11966 : vec_del1(pal->applied_acls, index);
867 :
868 13536 : u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
869 11966 : if (index2 == ~0) {
870 0 : clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
871 : acl_index, lc_index);
872 0 : return;
873 : }
874 11966 : vec_del1((*hash_acl_applied_lc_index), index2);
875 :
876 11966 : applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
877 :
878 114590 : for(i=0; i < vec_len((*applied_hash_aces)); i++) {
879 114590 : if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
880 : DBG("Found applied ACL#%d at applied index %d", acl_index, i);
881 11966 : break;
882 : }
883 : }
884 11966 : if (vec_len((*applied_hash_aces)) <= i) {
885 : DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
886 : /* we went all the way without finding any entries. Probably a list was empty. */
887 0 : return;
888 : }
889 :
890 11966 : int base_offset = i;
891 11966 : int tail_offset = base_offset + vec_len(ha->rules);
892 11966 : int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
893 : DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
894 :
895 85685 : for(i=0; i < vec_len(ha->rules); i ++) {
896 73719 : deactivate_applied_ace_hash_entry(am, lc_index,
897 73719 : applied_hash_aces, base_offset + i);
898 : }
899 51390 : for(i=0; i < tail_len; i ++) {
900 : /* move the entry at tail offset to base offset */
901 : /* that is, from (tail_offset+i) -> (base_offset+i) */
902 : DBG0("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
903 39424 : move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
904 : }
905 : /* trim the end of the vector */
906 11966 : vec_dec_len ((*applied_hash_aces), vec_len (ha->rules));
907 :
908 11966 : remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
909 :
910 11966 : if (vec_len((*applied_hash_aces)) == 0) {
911 2014 : vec_free((*applied_hash_aces));
912 : }
913 : }
914 :
915 : /*
916 : * Create the applied ACEs and update the hash table,
917 : * taking into account that the ACL may not be the last
918 : * in the vector of applied ACLs.
919 : *
920 : * For now, walk from the end of the vector and unapply the ACLs,
921 : * then apply the one in question and reapply the rest.
922 : */
923 :
924 : void
925 4320 : hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
926 : {
927 4320 : acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
928 4320 : u32 **applied_acls = &acontext->acl_indices;
929 : int i;
930 9792 : int start_index = vec_search((*applied_acls), acl_index);
931 :
932 : DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
933 : /*
934 : * This function is called after we find out the lc_index where ACL is applied.
935 : * If the by-lc_index vector does not have the ACL#, then it's a bug.
936 : */
937 4320 : ASSERT(start_index < vec_len(*applied_acls));
938 :
939 : /* unapply all the ACLs at the tail side, up to the current one */
940 10944 : for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
941 6624 : hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
942 : }
943 15264 : for(i = start_index; i < vec_len(*applied_acls); i++) {
944 10944 : hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
945 : }
946 4320 : }
947 :
948 : static void
949 16496 : make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
950 : {
951 16496 : ip6_address_mask_from_width(addr, prefix_len);
952 16496 : }
953 :
954 :
955 : /* Maybe should be moved into the core somewhere */
956 : always_inline void
957 16552 : ip4_address_mask_from_width (ip4_address_t * a, u32 width)
958 : {
959 : int i, byte, bit, bitnum;
960 16552 : ASSERT (width <= 32);
961 16552 : clib_memset (a, 0, sizeof (a[0]));
962 541256 : for (i = 0; i < width; i++)
963 : {
964 524704 : bitnum = (7 - (i & 7));
965 524704 : byte = i / 8;
966 524704 : bit = 1 << bitnum;
967 524704 : a->as_u8[byte] |= bit;
968 : }
969 16552 : }
970 :
971 :
972 : static void
973 16552 : make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
974 : {
975 16552 : ip4_address_mask_from_width(addr, prefix_len);
976 16552 : }
977 :
978 : static void
979 32936 : make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
980 : {
981 32936 : if (port_first == port_last) {
982 30209 : *portmask = 0xffff;
983 : /* single port is representable by masked value */
984 30209 : return;
985 : }
986 :
987 2727 : *portmask = 0;
988 2727 : return;
989 : }
990 :
991 : static void
992 16524 : make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
993 : {
994 16524 : clib_memset(mask, 0, sizeof(*mask));
995 16524 : clib_memset(&hi->match, 0, sizeof(hi->match));
996 16524 : hi->action = r->is_permit;
997 :
998 : /* we will need to be matching based on lc_index and mask_type_index when applied */
999 16524 : mask->pkt.lc_index = ~0;
1000 : /* we will assign the match of mask_type_index later when we find it*/
1001 16524 : mask->pkt.mask_type_index_lsb = ~0;
1002 :
1003 16524 : mask->pkt.is_ip6 = 1;
1004 16524 : hi->match.pkt.is_ip6 = r->is_ipv6;
1005 16524 : if (r->is_ipv6) {
1006 8248 : make_ip6_address_mask(&mask->ip6_addr[0], r->src_prefixlen);
1007 8248 : hi->match.ip6_addr[0] = r->src.ip6;
1008 8248 : make_ip6_address_mask(&mask->ip6_addr[1], r->dst_prefixlen);
1009 8248 : hi->match.ip6_addr[1] = r->dst.ip6;
1010 : } else {
1011 8276 : clib_memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
1012 8276 : make_ip4_address_mask(&mask->ip4_addr[0], r->src_prefixlen);
1013 8276 : hi->match.ip4_addr[0] = r->src.ip4;
1014 8276 : make_ip4_address_mask(&mask->ip4_addr[1], r->dst_prefixlen);
1015 8276 : hi->match.ip4_addr[1] = r->dst.ip4;
1016 : }
1017 :
1018 16524 : if (r->proto != 0) {
1019 16468 : mask->l4.proto = ~0; /* L4 proto needs to be matched */
1020 16468 : hi->match.l4.proto = r->proto;
1021 :
1022 : /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
1023 16468 : make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
1024 16468 : hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
1025 :
1026 16468 : make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
1027 16468 : hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
1028 : /* L4 info must be valid in order to match */
1029 16468 : mask->pkt.l4_valid = 1;
1030 16468 : hi->match.pkt.l4_valid = 1;
1031 : /* And we must set the mask to check that it is an initial fragment */
1032 16468 : mask->pkt.is_nonfirst_fragment = 1;
1033 16468 : hi->match.pkt.is_nonfirst_fragment = 0;
1034 16468 : if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
1035 : /* if we want to match on TCP flags, they must be masked off as well */
1036 0 : mask->pkt.tcp_flags = r->tcp_flags_mask;
1037 0 : hi->match.pkt.tcp_flags = r->tcp_flags_value;
1038 : /* and the flags need to be present within the packet being matched */
1039 0 : mask->pkt.tcp_flags_valid = 1;
1040 0 : hi->match.pkt.tcp_flags_valid = 1;
1041 : }
1042 : }
1043 : /* Sanitize the mask and the match */
1044 16524 : u64 *pmask = (u64 *)mask;
1045 16524 : u64 *pmatch = (u64 *)&hi->match;
1046 : int j;
1047 115668 : for(j=0; j<6; j++) {
1048 99144 : pmatch[j] = pmatch[j] & pmask[j];
1049 : }
1050 16524 : }
1051 :
1052 :
1053 2585 : int hash_acl_exists(acl_main_t *am, int acl_index)
1054 : {
1055 2585 : if (acl_index >= vec_len(am->hash_acl_infos))
1056 20 : return 0;
1057 :
1058 2565 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1059 2565 : return ha->hash_acl_exists;
1060 : }
1061 :
1062 2585 : void hash_acl_add(acl_main_t *am, int acl_index)
1063 : {
1064 : DBG("HASH ACL add : %d", acl_index);
1065 : int i;
1066 2585 : acl_rule_t *acl_rules = am->acls[acl_index].rules;
1067 2585 : vec_validate(am->hash_acl_infos, acl_index);
1068 2585 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1069 2585 : clib_memset(ha, 0, sizeof(*ha));
1070 2585 : ha->hash_acl_exists = 1;
1071 :
1072 : /* walk the newly added ACL entries and ensure that for each of them there
1073 : is a mask type, increment a reference count for that mask type */
1074 :
1075 : /* avoid small requests by preallocating the entire vector before running the additions */
1076 2585 : if (vec_len(acl_rules) > 0) {
1077 2585 : vec_validate(ha->rules, vec_len(acl_rules)-1);
1078 2585 : vec_reset_length(ha->rules);
1079 : }
1080 :
1081 19109 : for(i=0; i < vec_len(acl_rules); i++) {
1082 : hash_ace_info_t ace_info;
1083 : fa_5tuple_t mask;
1084 16524 : clib_memset(&ace_info, 0, sizeof(ace_info));
1085 16524 : ace_info.acl_index = acl_index;
1086 16524 : ace_info.ace_index = i;
1087 :
1088 16524 : make_mask_and_match_from_rule(&mask, &acl_rules[i], &ace_info);
1089 16524 : mask.pkt.flags_reserved = 0b000;
1090 16524 : ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
1091 : /* assign the mask type index for matching itself */
1092 16524 : ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
1093 : DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
1094 16524 : vec_add1(ha->rules, ace_info);
1095 : }
1096 : /*
1097 : * if an ACL is applied somewhere, fill the corresponding lookup data structures.
1098 : * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
1099 : */
1100 2585 : if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
1101 : u32 *lc_index;
1102 6885 : vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
1103 4320 : hash_acl_reapply(am, *lc_index, acl_index);
1104 : }
1105 : }
1106 2585 : }
1107 :
1108 2585 : void hash_acl_delete(acl_main_t *am, int acl_index)
1109 : {
1110 : DBG0("HASH ACL delete : %d", acl_index);
1111 : /*
1112 : * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
1113 : * this is a different behavior from the linear lookup where an empty ACL is "deny all",
1114 : *
1115 : * However, following vpp-dev discussion the ACL that is referenced elsewhere
1116 : * should not be possible to delete, and the change adding this also adds
1117 : * the safeguards to that respect, so this is not a problem.
1118 : *
1119 : * The part to remember is that this routine is called in process of reapplication
1120 : * during the acl_add_replace() API call - the old acl ruleset is deleted, then
1121 : * the new one is added, without the change in the applied ACLs - so this case
1122 : * has to be handled.
1123 : */
1124 2585 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1125 2585 : u32 *lc_list_copy = 0;
1126 : {
1127 : u32 *lc_index;
1128 2585 : lc_list_copy = vec_dup(ha->lc_index_list);
1129 6905 : vec_foreach(lc_index, lc_list_copy) {
1130 4320 : hash_acl_unapply(am, *lc_index, acl_index);
1131 : }
1132 2585 : vec_free(lc_list_copy);
1133 : }
1134 2585 : vec_free(ha->lc_index_list);
1135 :
1136 : /* walk the mask types for the ACL about-to-be-deleted, and decrease
1137 : * the reference count, possibly freeing up some of them */
1138 : int i;
1139 19109 : for(i=0; i < vec_len(ha->rules); i++) {
1140 16524 : release_mask_type_index(am, ha->rules[i].base_mask_type_index);
1141 : }
1142 2585 : ha->hash_acl_exists = 0;
1143 2585 : vec_free(ha->rules);
1144 2585 : }
1145 :
1146 :
1147 : void
1148 140 : show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
1149 : {
1150 140 : vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
1151 : BV (format_bihash), &am->acl_lookup_hash, verbose);
1152 140 : }
1153 :
1154 : void
1155 140 : acl_plugin_show_tables_mask_type (void)
1156 : {
1157 140 : acl_main_t *am = &acl_main;
1158 140 : vlib_main_t *vm = am->vlib_main;
1159 : ace_mask_type_entry_t *mte;
1160 :
1161 140 : vlib_cli_output (vm, "Mask-type entries:");
1162 : /* *INDENT-OFF* */
1163 362 : pool_foreach (mte, am->ace_mask_type_pool)
1164 : {
1165 222 : vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
1166 222 : mte - am->ace_mask_type_pool,
1167 : mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
1168 : mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
1169 : }
1170 : /* *INDENT-ON* */
1171 140 : }
1172 :
1173 : void
1174 140 : acl_plugin_show_tables_acl_hash_info (u32 acl_index)
1175 : {
1176 140 : acl_main_t *am = &acl_main;
1177 140 : vlib_main_t *vm = am->vlib_main;
1178 : u32 i, j;
1179 : u64 *m;
1180 140 : vlib_cli_output (vm, "Mask-ready ACL representations\n");
1181 488 : for (i = 0; i < vec_len (am->hash_acl_infos); i++)
1182 : {
1183 348 : if ((acl_index != ~0) && (acl_index != i))
1184 : {
1185 0 : continue;
1186 : }
1187 348 : hash_acl_info_t *ha = &am->hash_acl_infos[i];
1188 348 : vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
1189 348 : vlib_cli_output (vm, " applied lc_index list: %U\n",
1190 : format_vec32, ha->lc_index_list, "%d");
1191 1357 : for (j = 0; j < vec_len (ha->rules); j++)
1192 : {
1193 1009 : hash_ace_info_t *pa = &ha->rules[j];
1194 1009 : m = (u64 *) & pa->match;
1195 1009 : vlib_cli_output (vm,
1196 : " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
1197 1009 : j, m[0], m[1], m[2], m[3], m[4], m[5],
1198 : pa->base_mask_type_index, pa->acl_index, pa->ace_index,
1199 1009 : pa->action);
1200 : }
1201 : }
1202 140 : }
1203 :
1204 : static void
1205 1528 : acl_plugin_print_colliding_rule (vlib_main_t * vm, int j, collision_match_rule_t *cr) {
1206 1528 : vlib_cli_output(vm,
1207 : " %4d: acl %d ace %d acl pos %d pae index: %d",
1208 : j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
1209 1528 : }
1210 :
1211 : static void
1212 1528 : acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
1213 : {
1214 24 : vlib_cli_output (vm,
1215 : " %4d: acl %d rule %d action %d bitmask-ready rule %d mask type index: %d colliding_rules: %d collision_head_ae_idx %d hitcount %lld acl_pos: %d",
1216 1528 : j, pae->acl_index, pae->ace_index, pae->action,
1217 1528 : pae->hash_ace_info_index, pae->mask_type_index, vec_len(pae->colliding_rules), pae->collision_head_ae_index,
1218 : pae->hitcount, pae->acl_position);
1219 : int jj;
1220 3056 : for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
1221 1528 : acl_plugin_print_colliding_rule(vm, jj, vec_elt_at_index(pae->colliding_rules, jj));
1222 1528 : }
1223 :
1224 : static void
1225 404 : acl_plugin_print_applied_mask_info (vlib_main_t * vm, int j, hash_applied_mask_info_t *mi)
1226 : {
1227 404 : vlib_cli_output (vm,
1228 : " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
1229 : j, mi->mask_type_index, mi->first_rule_index, mi->num_entries, mi->max_collisions);
1230 404 : }
1231 :
1232 : void
1233 140 : acl_plugin_show_tables_applied_info (u32 lc_index)
1234 : {
1235 140 : acl_main_t *am = &acl_main;
1236 140 : vlib_main_t *vm = am->vlib_main;
1237 : u32 lci, j;
1238 140 : vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
1239 :
1240 587 : for (lci = 0;
1241 447 : (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
1242 : {
1243 307 : if ((lc_index != ~0) && (lc_index != lci))
1244 : {
1245 0 : continue;
1246 : }
1247 307 : vlib_cli_output (vm, "lc_index %d:", lci);
1248 307 : if (lci < vec_len (am->applied_hash_acl_info_by_lc_index))
1249 : {
1250 307 : applied_hash_acl_info_t *pal =
1251 307 : &am->applied_hash_acl_info_by_lc_index[lci];
1252 307 : vlib_cli_output (vm, " applied acls: %U", format_vec32,
1253 : pal->applied_acls, "%d");
1254 : }
1255 307 : if (lci < vec_len (am->hash_applied_mask_info_vec_by_lc_index))
1256 : {
1257 307 : vlib_cli_output (vm, " applied mask info entries:");
1258 1018 : for (j = 0;
1259 711 : j < vec_len (am->hash_applied_mask_info_vec_by_lc_index[lci]);
1260 404 : j++)
1261 : {
1262 404 : acl_plugin_print_applied_mask_info (vm, j,
1263 404 : &am->hash_applied_mask_info_vec_by_lc_index
1264 404 : [lci][j]);
1265 : }
1266 : }
1267 307 : if (lci < vec_len (am->hash_entry_vec_by_lc_index))
1268 : {
1269 307 : vlib_cli_output (vm, " lookup applied entries:");
1270 2142 : for (j = 0;
1271 1835 : j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
1272 1528 : j++)
1273 : {
1274 1528 : acl_plugin_print_pae (vm, j,
1275 1528 : &am->hash_entry_vec_by_lc_index
1276 1528 : [lci][j]);
1277 : }
1278 : }
1279 : }
1280 140 : }
1281 :
1282 : void
1283 140 : acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
1284 : {
1285 140 : acl_main_t *am = &acl_main;
1286 140 : vlib_main_t *vm = am->vlib_main;
1287 140 : show_hash_acl_hash (vm, am, show_bihash_verbose);
1288 140 : }
1289 :
1290 : /*
1291 : * Split of the partition needs to happen when the collision count
1292 : * goes over a specified threshold.
1293 : *
1294 : * This is a signal that we ignored too many bits in
1295 : * mT and we need to split the table into two tables. We select
1296 : * all of the colliding rules L and find their maximum common
1297 : * tuple mL. Normally mL is specific enough to hash L with few
1298 : * or no collisions. We then create a new table T2 with tuple mL
1299 : * and transfer all compatible rules from T to T2. If mL is not
1300 : * specific enough, we find the field with the biggest difference
1301 : * between the minimum and maximum tuple lengths for all of
1302 : * the rules in L and set that field to be the average of those two
1303 : * values. We then transfer all compatible rules as before. This
1304 : * guarantees that some rules from L will move and that T2 will
1305 : * have a smaller number of collisions than T did.
1306 : */
1307 :
1308 :
1309 : static void
1310 0 : ensure_ip6_min_addr (ip6_address_t * min_addr, ip6_address_t * mask_addr)
1311 : {
1312 0 : int update =
1313 0 : (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
1314 0 : clib_net_to_host_u64 (min_addr->as_u64[0]))
1315 0 : ||
1316 0 : ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1317 0 : clib_net_to_host_u64 (min_addr->as_u64[0]))
1318 0 : && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
1319 0 : clib_net_to_host_u64 (min_addr->as_u64[1])));
1320 0 : if (update)
1321 : {
1322 0 : min_addr->as_u64[0] = mask_addr->as_u64[0];
1323 0 : min_addr->as_u64[1] = mask_addr->as_u64[1];
1324 : }
1325 0 : }
1326 :
1327 : static void
1328 0 : ensure_ip6_max_addr (ip6_address_t * max_addr, ip6_address_t * mask_addr)
1329 : {
1330 0 : int update =
1331 0 : (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
1332 0 : clib_net_to_host_u64 (max_addr->as_u64[0]))
1333 0 : ||
1334 0 : ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1335 0 : clib_net_to_host_u64 (max_addr->as_u64[0]))
1336 0 : && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
1337 0 : clib_net_to_host_u64 (max_addr->as_u64[1])));
1338 0 : if (update)
1339 : {
1340 0 : max_addr->as_u64[0] = mask_addr->as_u64[0];
1341 0 : max_addr->as_u64[1] = mask_addr->as_u64[1];
1342 : }
1343 0 : }
1344 :
1345 : static void
1346 0 : ensure_ip4_min_addr (ip4_address_t * min_addr, ip4_address_t * mask_addr)
1347 : {
1348 0 : int update =
1349 0 : (clib_net_to_host_u32 (mask_addr->as_u32) <
1350 0 : clib_net_to_host_u32 (min_addr->as_u32));
1351 0 : if (update)
1352 0 : min_addr->as_u32 = mask_addr->as_u32;
1353 0 : }
1354 :
1355 : static void
1356 0 : ensure_ip4_max_addr (ip4_address_t * max_addr, ip4_address_t * mask_addr)
1357 : {
1358 0 : int update =
1359 0 : (clib_net_to_host_u32 (mask_addr->as_u32) >
1360 0 : clib_net_to_host_u32 (max_addr->as_u32));
1361 0 : if (update)
1362 0 : max_addr->as_u32 = mask_addr->as_u32;
1363 0 : }
1364 :
1365 : enum {
1366 : DIM_SRC_ADDR = 0,
1367 : DIM_DST_ADDR,
1368 : DIM_SRC_PORT,
1369 : DIM_DST_PORT,
1370 : DIM_PROTO,
1371 : };
1372 :
1373 :
1374 :
1375 : static void
1376 0 : split_partition(acl_main_t *am, u32 first_index,
1377 : u32 lc_index, int is_ip6){
1378 : DBG( "TM-split_partition - first_entry:%d", first_index);
1379 0 : applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
1380 : ace_mask_type_entry_t *mte;
1381 0 : fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
1382 0 : fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
1383 0 : applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
1384 0 : hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1385 : hash_ace_info_t *ace_info;
1386 0 : u32 coll_mask_type_index = pae->mask_type_index;
1387 0 : clib_memset(&the_min_tuple, 0, sizeof(the_min_tuple));
1388 0 : clib_memset(&the_max_tuple, 0, sizeof(the_max_tuple));
1389 :
1390 0 : int i=0;
1391 0 : collision_match_rule_t *colliding_rules = pae->colliding_rules;
1392 0 : u64 collisions = vec_len(pae->colliding_rules);
1393 0 : for(i=0; i<collisions; i++){
1394 : /* reload the hash acl info as it might be a different ACL# */
1395 0 : pae = vec_elt_at_index((*applied_hash_aces), colliding_rules[i].applied_entry_index);
1396 0 : ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1397 :
1398 : DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1399 : pae->ace_index, pae->mask_type_index, coll_mask_type_index);
1400 :
1401 0 : ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
1402 0 : mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1403 0 : fa_5tuple_t *mask = &mte->mask;
1404 :
1405 0 : if(pae->mask_type_index != coll_mask_type_index) continue;
1406 : /* Computing min_mask and max_mask for colliding rules */
1407 0 : if(i==0){
1408 0 : clib_memcpy_fast(min_tuple, mask, sizeof(fa_5tuple_t));
1409 0 : clib_memcpy_fast(max_tuple, mask, sizeof(fa_5tuple_t));
1410 : }else{
1411 : int j;
1412 0 : for(j=0; j<2; j++){
1413 0 : if (is_ip6)
1414 0 : ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1415 : else
1416 0 : ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1417 :
1418 0 : if ((mask->l4.port[j] < min_tuple->l4.port[j]))
1419 0 : min_tuple->l4.port[j] = mask->l4.port[j];
1420 : }
1421 :
1422 0 : if ((mask->l4.proto < min_tuple->l4.proto))
1423 0 : min_tuple->l4.proto = mask->l4.proto;
1424 :
1425 0 : if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
1426 0 : min_tuple->pkt.as_u64 = mask->pkt.as_u64;
1427 :
1428 :
1429 0 : for(j=0; j<2; j++){
1430 0 : if (is_ip6)
1431 0 : ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1432 : else
1433 0 : ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1434 :
1435 0 : if ((mask->l4.port[j] > max_tuple->l4.port[j]))
1436 0 : max_tuple->l4.port[j] = mask->l4.port[j];
1437 : }
1438 :
1439 0 : if ((mask->l4.proto < max_tuple->l4.proto))
1440 0 : max_tuple->l4.proto = mask->l4.proto;
1441 :
1442 0 : if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
1443 0 : max_tuple->pkt.as_u64 = mask->pkt.as_u64;
1444 : }
1445 : }
1446 :
1447 : /* Computing field with max difference between (min/max)_mask */
1448 0 : int best_dim=-1, best_delta=0, delta=0;
1449 :
1450 : /* SRC_addr dimension */
1451 0 : if (is_ip6) {
1452 : int i;
1453 0 : for(i=0; i<2; i++){
1454 0 : delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
1455 : }
1456 : } else {
1457 0 : delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
1458 : }
1459 0 : if(delta > best_delta){
1460 0 : best_delta = delta;
1461 0 : best_dim = DIM_SRC_ADDR;
1462 : }
1463 :
1464 : /* DST_addr dimension */
1465 0 : delta = 0;
1466 0 : if (is_ip6) {
1467 : int i;
1468 0 : for(i=0; i<2; i++){
1469 0 : delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
1470 : }
1471 : } else {
1472 0 : delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
1473 : }
1474 0 : if(delta > best_delta){
1475 0 : best_delta = delta;
1476 0 : best_dim = DIM_DST_ADDR;
1477 : }
1478 :
1479 : /* SRC_port dimension */
1480 0 : delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
1481 0 : if(delta > best_delta){
1482 0 : best_delta = delta;
1483 0 : best_dim = DIM_SRC_PORT;
1484 : }
1485 :
1486 : /* DST_port dimension */
1487 0 : delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
1488 0 : if(delta > best_delta){
1489 0 : best_delta = delta;
1490 0 : best_dim = DIM_DST_PORT;
1491 : }
1492 :
1493 : /* Proto dimension */
1494 0 : delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
1495 0 : if(delta > best_delta){
1496 0 : best_delta = delta;
1497 0 : best_dim = DIM_PROTO;
1498 : }
1499 :
1500 0 : int shifting = 0; //, ipv4_block = 0;
1501 0 : switch(best_dim){
1502 0 : case DIM_SRC_ADDR:
1503 0 : shifting = (best_delta)/2; // FIXME IPV4-only
1504 : // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
1505 0 : min_tuple->ip4_addr[0].as_u32 =
1506 0 : clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
1507 :
1508 0 : break;
1509 0 : case DIM_DST_ADDR:
1510 0 : shifting = (best_delta)/2;
1511 : /*
1512 : ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
1513 : if(ipv4_block > shifting)
1514 : min_tuple->addr[1].as_u64[1] =
1515 : clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
1516 : else{
1517 : shifting = shifting - ipv4_block;
1518 : min_tuple->addr[1].as_u64[1] = 0;
1519 : min_tuple->addr[1].as_u64[0] =
1520 : clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
1521 : }
1522 : */
1523 0 : min_tuple->ip4_addr[1].as_u32 =
1524 0 : clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
1525 :
1526 0 : break;
1527 0 : case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
1528 0 : break;
1529 0 : case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
1530 0 : break;
1531 0 : case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
1532 0 : break;
1533 0 : default: relax_tuple(min_tuple, is_ip6, 1);
1534 0 : break;
1535 : }
1536 :
1537 0 : min_tuple->pkt.is_nonfirst_fragment = 0;
1538 0 : u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
1539 :
1540 0 : hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
1541 :
1542 : hash_applied_mask_info_t *minfo;
1543 : //search in order pool if mask_type_index is already there
1544 : int search;
1545 0 : for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
1546 0 : minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1547 0 : if(minfo->mask_type_index == new_mask_type_index)
1548 0 : break;
1549 : }
1550 :
1551 0 : vec_validate((*hash_applied_mask_info_vec), search);
1552 0 : minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1553 0 : minfo->mask_type_index = new_mask_type_index;
1554 0 : minfo->num_entries = 0;
1555 0 : minfo->max_collisions = 0;
1556 0 : minfo->first_rule_index = ~0;
1557 :
1558 : DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
1559 :
1560 0 : if(coll_mask_type_index == new_mask_type_index){
1561 : //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
1562 0 : return;
1563 : }
1564 :
1565 :
1566 : /* populate new partition */
1567 : DBG( "TM-Populate new partition");
1568 0 : u32 r_ace_index = first_index;
1569 0 : int repopulate_count = 0;
1570 :
1571 0 : collision_match_rule_t *temp_colliding_rules = vec_dup(colliding_rules);
1572 0 : collisions = vec_len(temp_colliding_rules);
1573 :
1574 0 : for(i=0; i<collisions; i++){
1575 :
1576 0 : r_ace_index = temp_colliding_rules[i].applied_entry_index;
1577 :
1578 0 : applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
1579 0 : ha = vec_elt_at_index(am->hash_acl_infos, pop_pae->acl_index);
1580 : DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1581 : pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
1582 :
1583 0 : ASSERT(pop_pae->mask_type_index == coll_mask_type_index);
1584 :
1585 0 : ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
1586 0 : mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1587 : //can insert rule?
1588 : //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
1589 0 : fa_5tuple_t *pop_mask = &mte->mask;
1590 :
1591 0 : if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
1592 : DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
1593 :
1594 : //delete and insert in new format
1595 0 : deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1596 :
1597 : /* insert the new entry */
1598 0 : pop_pae->mask_type_index = new_mask_type_index;
1599 : /* The very first repopulation gets the lock by virtue of a new mask being created above */
1600 0 : if (++repopulate_count > 1)
1601 0 : lock_mask_type_index(am, new_mask_type_index);
1602 :
1603 0 : activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1604 :
1605 : }
1606 0 : vec_free(temp_colliding_rules);
1607 :
1608 : DBG( "TM-Populate new partition-END");
1609 : DBG( "TM-split_partition - END");
1610 :
1611 : }
|