Line data Source code
1 : /*
2 : * Copyright (c) 2017 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : /**
17 : * @file
18 : * @brief IPv4 Shallow Virtual Reassembly.
19 : *
20 : * This file contains the source code for IPv4 Shallow Virtual reassembly.
21 : */
22 :
23 : #include <vppinfra/vec.h>
24 : #include <vnet/vnet.h>
25 : #include <vnet/ip/ip.h>
26 : #include <vnet/ip/ip4_to_ip6.h>
27 : #include <vppinfra/fifo.h>
28 : #include <vppinfra/bihash_16_8.h>
29 : #include <vnet/ip/reass/ip4_sv_reass.h>
30 :
31 : #define MSEC_PER_SEC 1000
32 : #define IP4_SV_REASS_TIMEOUT_DEFAULT_MS 100
33 : #define IP4_SV_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
34 : #define IP4_SV_REASS_MAX_REASSEMBLIES_DEFAULT 1024
35 : #define IP4_SV_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
36 : #define IP4_SV_REASS_HT_LOAD_FACTOR (0.75)
37 :
38 : typedef enum
39 : {
40 : IP4_SV_REASS_RC_OK,
41 : IP4_SV_REASS_RC_TOO_MANY_FRAGMENTS,
42 : IP4_SV_REASS_RC_UNSUPP_IP_PROTO,
43 : } ip4_sv_reass_rc_t;
44 :
45 : typedef struct
46 : {
47 : union
48 : {
49 : struct
50 : {
51 : u32 fib_index;
52 : ip4_address_t src;
53 : ip4_address_t dst;
54 : u16 frag_id;
55 : u8 proto;
56 : u8 unused;
57 : };
58 : u64 as_u64[2];
59 : };
60 : } ip4_sv_reass_key_t;
61 :
62 : typedef union
63 : {
64 : struct
65 : {
66 : u32 reass_index;
67 : u32 thread_index;
68 : };
69 : u64 as_u64;
70 : } ip4_sv_reass_val_t;
71 :
72 : typedef union
73 : {
74 : struct
75 : {
76 : ip4_sv_reass_key_t k;
77 : ip4_sv_reass_val_t v;
78 : };
79 : clib_bihash_kv_16_8_t kv;
80 : } ip4_sv_reass_kv_t;
81 :
82 : typedef struct
83 : {
84 : // hash table key
85 : ip4_sv_reass_key_t key;
86 : // time when last packet was received
87 : f64 last_heard;
88 : // internal id of this reassembly
89 : u64 id;
90 : // trace operation counter
91 : u32 trace_op_counter;
92 : // minimum fragment length for this reassembly - used to estimate MTU
93 : u16 min_fragment_length;
94 : // buffer indexes of buffers in this reassembly in chronological order -
95 : // including overlaps and duplicate fragments
96 : u32 *cached_buffers;
97 : // set to true when this reassembly is completed
98 : bool is_complete;
99 : // ip protocol
100 : u8 ip_proto;
101 : u8 icmp_type_or_tcp_flags;
102 : u32 tcp_ack_number;
103 : u32 tcp_seq_number;
104 : // l4 src port
105 : u16 l4_src_port;
106 : // l4 dst port
107 : u16 l4_dst_port;
108 : u32 next_index;
109 : // lru indexes
110 : u32 lru_prev;
111 : u32 lru_next;
112 : } ip4_sv_reass_t;
113 :
114 : typedef struct
115 : {
116 : ip4_sv_reass_t *pool;
117 : u32 reass_n;
118 : u32 id_counter;
119 : clib_spinlock_t lock;
120 : // lru indexes
121 : u32 lru_first;
122 : u32 lru_last;
123 :
124 : } ip4_sv_reass_per_thread_t;
125 :
126 : typedef struct
127 : {
128 : // IPv4 config
129 : u32 timeout_ms;
130 : f64 timeout;
131 : u32 expire_walk_interval_ms;
132 : // maximum number of fragments in one reassembly
133 : u32 max_reass_len;
134 : // maximum number of reassemblies
135 : u32 max_reass_n;
136 :
137 : // IPv4 runtime
138 : clib_bihash_16_8_t hash;
139 : // per-thread data
140 : ip4_sv_reass_per_thread_t *per_thread_data;
141 :
142 : // convenience
143 : vlib_main_t *vlib_main;
144 : vnet_main_t *vnet_main;
145 :
146 : // node index of ip4-drop node
147 : u32 ip4_drop_idx;
148 : u32 ip4_sv_reass_expire_node_idx;
149 :
150 : /** Worker handoff */
151 : u32 fq_index;
152 : u32 fq_feature_index;
153 : u32 fq_custom_context_index;
154 :
155 : // reference count for enabling/disabling feature - per interface
156 : u32 *feature_use_refcount_per_intf;
157 :
158 : // reference count for enabling/disabling feature - per interface
159 : u32 *output_feature_use_refcount_per_intf;
160 :
161 : } ip4_sv_reass_main_t;
162 :
163 : extern ip4_sv_reass_main_t ip4_sv_reass_main;
164 :
165 : #ifndef CLIB_MARCH_VARIANT
166 : ip4_sv_reass_main_t ip4_sv_reass_main;
167 : #endif /* CLIB_MARCH_VARIANT */
168 :
169 : typedef enum
170 : {
171 : IP4_SV_REASSEMBLY_NEXT_INPUT,
172 : IP4_SV_REASSEMBLY_NEXT_DROP,
173 : IP4_SV_REASSEMBLY_NEXT_HANDOFF,
174 : IP4_SV_REASSEMBLY_N_NEXT,
175 : } ip4_sv_reass_next_t;
176 :
177 : typedef enum
178 : {
179 : REASS_FRAGMENT_CACHE,
180 : REASS_FINISH,
181 : REASS_FRAGMENT_FORWARD,
182 : REASS_PASSTHROUGH,
183 : } ip4_sv_reass_trace_operation_e;
184 :
185 : typedef struct
186 : {
187 : ip4_sv_reass_trace_operation_e action;
188 : u32 reass_id;
189 : u32 op_id;
190 : u8 ip_proto;
191 : u16 l4_src_port;
192 : u16 l4_dst_port;
193 : int l4_layer_truncated;
194 : } ip4_sv_reass_trace_t;
195 :
196 : extern vlib_node_registration_t ip4_sv_reass_node;
197 : extern vlib_node_registration_t ip4_sv_reass_node_feature;
198 :
199 : static u8 *
200 7197 : format_ip4_sv_reass_trace (u8 * s, va_list * args)
201 : {
202 7197 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
203 7197 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
204 7197 : ip4_sv_reass_trace_t *t = va_arg (*args, ip4_sv_reass_trace_t *);
205 7197 : if (REASS_PASSTHROUGH != t->action)
206 : {
207 680 : s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
208 : }
209 7197 : switch (t->action)
210 : {
211 37 : case REASS_FRAGMENT_CACHE:
212 37 : s = format (s, "[cached]");
213 37 : break;
214 273 : case REASS_FINISH:
215 : s =
216 546 : format (s, "[finish, ip proto=%u, src_port=%u, dst_port=%u]",
217 273 : t->ip_proto, clib_net_to_host_u16 (t->l4_src_port),
218 273 : clib_net_to_host_u16 (t->l4_dst_port));
219 273 : break;
220 370 : case REASS_FRAGMENT_FORWARD:
221 : s =
222 740 : format (s, "[forward, ip proto=%u, src_port=%u, dst_port=%u]",
223 370 : t->ip_proto, clib_net_to_host_u16 (t->l4_src_port),
224 370 : clib_net_to_host_u16 (t->l4_dst_port));
225 370 : break;
226 6517 : case REASS_PASSTHROUGH:
227 6517 : s = format (s, "[not-fragmented]");
228 6517 : break;
229 : }
230 7197 : if (t->l4_layer_truncated)
231 : {
232 0 : s = format (s, " [l4-layer-truncated]");
233 : }
234 7197 : return s;
235 : }
236 :
237 : static void
238 28025 : ip4_sv_reass_add_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
239 : ip4_sv_reass_t *reass, u32 bi,
240 : ip4_sv_reass_trace_operation_e action, u32 ip_proto,
241 : u16 l4_src_port, u16 l4_dst_port,
242 : int l4_layer_truncated)
243 : {
244 28025 : vlib_buffer_t *b = vlib_get_buffer (vm, bi);
245 28004 : if (pool_is_free_index
246 28116 : (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
247 : {
248 : // this buffer's trace is gone
249 0 : b->flags &= ~VLIB_BUFFER_IS_TRACED;
250 0 : return;
251 : }
252 28004 : ip4_sv_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
253 27660 : if (reass)
254 : {
255 1654 : t->reass_id = reass->id;
256 1654 : t->op_id = reass->trace_op_counter;
257 1654 : ++reass->trace_op_counter;
258 : }
259 27660 : t->action = action;
260 27660 : t->ip_proto = ip_proto;
261 27660 : t->l4_src_port = l4_src_port;
262 27660 : t->l4_dst_port = l4_dst_port;
263 27660 : t->l4_layer_truncated = l4_layer_truncated;
264 : #if 0
265 : static u8 *s = NULL;
266 : s = format (s, "%U", format_ip4_sv_reass_trace, NULL, NULL, t);
267 : printf ("%.*s\n", vec_len (s), s);
268 : fflush (stdout);
269 : vec_reset_length (s);
270 : #endif
271 : }
272 :
273 :
274 : always_inline void
275 666 : ip4_sv_reass_free (vlib_main_t * vm, ip4_sv_reass_main_t * rm,
276 : ip4_sv_reass_per_thread_t * rt, ip4_sv_reass_t * reass)
277 : {
278 : clib_bihash_kv_16_8_t kv;
279 666 : kv.key[0] = reass->key.as_u64[0];
280 666 : kv.key[1] = reass->key.as_u64[1];
281 666 : clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
282 666 : vlib_buffer_free (vm, reass->cached_buffers,
283 666 : vec_len (reass->cached_buffers));
284 666 : vec_free (reass->cached_buffers);
285 666 : reass->cached_buffers = NULL;
286 666 : if (~0 != reass->lru_prev)
287 : {
288 26 : ip4_sv_reass_t *lru_prev =
289 26 : pool_elt_at_index (rt->pool, reass->lru_prev);
290 26 : lru_prev->lru_next = reass->lru_next;
291 : }
292 666 : if (~0 != reass->lru_next)
293 : {
294 624 : ip4_sv_reass_t *lru_next =
295 624 : pool_elt_at_index (rt->pool, reass->lru_next);
296 624 : lru_next->lru_prev = reass->lru_prev;
297 : }
298 666 : if (rt->lru_first == reass - rt->pool)
299 : {
300 640 : rt->lru_first = reass->lru_next;
301 : }
302 666 : if (rt->lru_last == reass - rt->pool)
303 : {
304 42 : rt->lru_last = reass->lru_prev;
305 : }
306 666 : pool_put (rt->pool, reass);
307 666 : --rt->reass_n;
308 666 : }
309 :
310 : always_inline void
311 669 : ip4_sv_reass_init (ip4_sv_reass_t * reass)
312 : {
313 669 : reass->cached_buffers = NULL;
314 669 : reass->is_complete = false;
315 669 : }
316 :
317 : always_inline ip4_sv_reass_t *
318 889 : ip4_sv_reass_find_or_create (vlib_main_t * vm, ip4_sv_reass_main_t * rm,
319 : ip4_sv_reass_per_thread_t * rt,
320 : ip4_sv_reass_kv_t * kv, u8 * do_handoff)
321 : {
322 889 : ip4_sv_reass_t *reass = NULL;
323 889 : f64 now = vlib_time_now (vm);
324 :
325 889 : again:
326 :
327 889 : if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
328 : {
329 220 : if (vm->thread_index != kv->v.thread_index)
330 : {
331 0 : *do_handoff = 1;
332 0 : return NULL;
333 : }
334 220 : reass = pool_elt_at_index (rt->pool, kv->v.reass_index);
335 :
336 220 : if (now > reass->last_heard + rm->timeout)
337 : {
338 0 : ip4_sv_reass_free (vm, rm, rt, reass);
339 0 : reass = NULL;
340 : }
341 : }
342 :
343 889 : if (reass)
344 : {
345 220 : reass->last_heard = now;
346 220 : return reass;
347 : }
348 :
349 669 : if (rt->reass_n >= rm->max_reass_n && rm->max_reass_n)
350 : {
351 9 : reass = pool_elt_at_index (rt->pool, rt->lru_first);
352 9 : ip4_sv_reass_free (vm, rm, rt, reass);
353 : }
354 :
355 669 : pool_get (rt->pool, reass);
356 669 : clib_memset (reass, 0, sizeof (*reass));
357 669 : reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
358 669 : ++rt->id_counter;
359 669 : ip4_sv_reass_init (reass);
360 669 : ++rt->reass_n;
361 669 : reass->lru_prev = reass->lru_next = ~0;
362 :
363 669 : if (~0 != rt->lru_last)
364 : {
365 644 : ip4_sv_reass_t *lru_last = pool_elt_at_index (rt->pool, rt->lru_last);
366 644 : reass->lru_prev = rt->lru_last;
367 644 : lru_last->lru_next = rt->lru_last = reass - rt->pool;
368 : }
369 :
370 669 : if (~0 == rt->lru_first)
371 : {
372 25 : rt->lru_first = rt->lru_last = reass - rt->pool;
373 : }
374 :
375 669 : reass->key.as_u64[0] = kv->kv.key[0];
376 669 : reass->key.as_u64[1] = kv->kv.key[1];
377 669 : kv->v.reass_index = (reass - rt->pool);
378 669 : kv->v.thread_index = vm->thread_index;
379 669 : reass->last_heard = now;
380 :
381 669 : int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
382 669 : if (rv)
383 : {
384 0 : ip4_sv_reass_free (vm, rm, rt, reass);
385 0 : reass = NULL;
386 : // if other worker created a context already work with the other copy
387 0 : if (-2 == rv)
388 0 : goto again;
389 : }
390 :
391 669 : return reass;
392 : }
393 :
394 : always_inline ip4_sv_reass_rc_t
395 768 : ip4_sv_reass_update (vlib_main_t *vm, vlib_node_runtime_t *node,
396 : ip4_sv_reass_main_t *rm, ip4_header_t *ip0,
397 : ip4_sv_reass_t *reass, u32 bi0)
398 : {
399 768 : vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
400 768 : ip4_sv_reass_rc_t rc = IP4_SV_REASS_RC_OK;
401 768 : const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
402 768 : if (0 == fragment_first)
403 : {
404 668 : reass->ip_proto = ip0->protocol;
405 668 : reass->l4_src_port = ip4_get_port (ip0, 1);
406 668 : reass->l4_dst_port = ip4_get_port (ip0, 0);
407 668 : if (!reass->l4_src_port || !reass->l4_dst_port)
408 0 : return IP4_SV_REASS_RC_UNSUPP_IP_PROTO;
409 668 : if (IP_PROTOCOL_TCP == reass->ip_proto)
410 : {
411 34 : reass->icmp_type_or_tcp_flags = ((tcp_header_t *) (ip0 + 1))->flags;
412 34 : reass->tcp_ack_number = ((tcp_header_t *) (ip0 + 1))->ack_number;
413 34 : reass->tcp_seq_number = ((tcp_header_t *) (ip0 + 1))->seq_number;
414 : }
415 634 : else if (IP_PROTOCOL_ICMP == reass->ip_proto)
416 : {
417 23 : reass->icmp_type_or_tcp_flags =
418 23 : ((icmp46_header_t *) (ip0 + 1))->type;
419 : }
420 668 : reass->is_complete = true;
421 668 : vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
422 668 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
423 : {
424 668 : ip4_sv_reass_add_trace (
425 668 : vm, node, reass, bi0, REASS_FINISH, reass->ip_proto,
426 668 : reass->l4_src_port, reass->l4_dst_port,
427 668 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
428 : }
429 : }
430 768 : vec_add1 (reass->cached_buffers, bi0);
431 768 : if (!reass->is_complete)
432 : {
433 100 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
434 : {
435 100 : ip4_sv_reass_add_trace (
436 : vm, node, reass, bi0, REASS_FRAGMENT_CACHE, ~0, ~0, ~0,
437 100 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
438 : }
439 100 : if (vec_len (reass->cached_buffers) > rm->max_reass_len)
440 : {
441 0 : rc = IP4_SV_REASS_RC_TOO_MANY_FRAGMENTS;
442 : }
443 : }
444 768 : return rc;
445 : }
446 :
447 : always_inline int
448 44478 : l4_layer_truncated (ip4_header_t *ip)
449 : {
450 : static const int l4_layer_length[256] = {
451 : [IP_PROTOCOL_TCP] = sizeof (tcp_header_t),
452 : [IP_PROTOCOL_UDP] = sizeof (udp_header_t),
453 : [IP_PROTOCOL_ICMP] = sizeof (icmp46_header_t),
454 : };
455 :
456 88954 : return ((u8 *) ip + ip4_header_bytes (ip) + l4_layer_length[ip->protocol] >
457 44467 : (u8 *) ip + clib_net_to_host_u16 (ip->length));
458 : }
459 :
460 : always_inline uword
461 809 : ip4_sv_reass_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
462 : vlib_frame_t *frame, bool is_feature,
463 : bool is_output_feature, bool is_custom,
464 : bool with_custom_context)
465 : {
466 809 : u32 *from = vlib_frame_vector_args (frame);
467 : u32 n_left_from, n_left_to_next, *to_next, *to_next_aux, next_index;
468 809 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
469 809 : ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
470 : u32 *context;
471 809 : if (with_custom_context)
472 0 : context = vlib_frame_aux_args (frame);
473 :
474 809 : clib_spinlock_lock (&rt->lock);
475 :
476 809 : n_left_from = frame->n_vectors;
477 809 : next_index = node->cached_next_index;
478 :
479 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
480 809 : vlib_get_buffers (vm, from, bufs, n_left_from);
481 811 : u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
482 811 : b = bufs;
483 :
484 : /* optimistic case first - no fragments */
485 22833 : while (n_left_from >= 2)
486 : {
487 : vlib_buffer_t *b0, *b1;
488 : u32 next0, next1;
489 22129 : b0 = *b;
490 22129 : b++;
491 22129 : b1 = *b;
492 22129 : b++;
493 :
494 : /* Prefetch next iteration. */
495 22129 : if (PREDICT_TRUE (n_left_from >= 4))
496 : {
497 : vlib_buffer_t *p2, *p3;
498 :
499 21649 : p2 = *b;
500 21649 : p3 = *(b + 1);
501 :
502 21649 : vlib_prefetch_buffer_header (p2, LOAD);
503 21655 : vlib_prefetch_buffer_header (p3, LOAD);
504 :
505 21656 : clib_prefetch_load (p2->data);
506 21657 : clib_prefetch_load (p3->data);
507 : }
508 :
509 22135 : ip4_header_t *ip0 =
510 22128 : (ip4_header_t *) u8_ptr_add (vlib_buffer_get_current (b0),
511 : (is_output_feature ? 1 : 0) *
512 : vnet_buffer (b0)->
513 : ip.save_rewrite_length);
514 22123 : ip4_header_t *ip1 =
515 22135 : (ip4_header_t *) u8_ptr_add (vlib_buffer_get_current (b1),
516 : (is_output_feature ? 1 : 0) *
517 : vnet_buffer (b1)->
518 : ip.save_rewrite_length);
519 :
520 22123 : if (PREDICT_FALSE
521 : (ip4_get_fragment_more (ip0) || ip4_get_fragment_offset (ip0))
522 22118 : || (ip4_get_fragment_more (ip1) || ip4_get_fragment_offset (ip1)))
523 : {
524 : // fragment found, go slow path
525 104 : b -= 2;
526 104 : if (b - bufs > 0)
527 : {
528 8 : vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
529 8 : b - bufs);
530 : }
531 105 : goto slow_path;
532 : }
533 22077 : if (is_feature)
534 : {
535 22077 : vnet_feature_next (&next0, b0);
536 : }
537 : else
538 : {
539 0 : next0 = is_custom ? vnet_buffer (b0)->ip.reass.next_index :
540 : IP4_SV_REASSEMBLY_NEXT_INPUT;
541 : }
542 22101 : vnet_buffer (b0)->ip.reass.is_non_first_fragment = 0;
543 22101 : vnet_buffer (b0)->ip.reass.ip_proto = ip0->protocol;
544 22101 : if (l4_layer_truncated (ip0))
545 : {
546 0 : vnet_buffer (b0)->ip.reass.l4_layer_truncated = 1;
547 0 : vnet_buffer (b0)->ip.reass.l4_src_port = 0;
548 0 : vnet_buffer (b0)->ip.reass.l4_dst_port = 0;
549 : }
550 : else
551 : {
552 22083 : vnet_buffer (b0)->ip.reass.l4_layer_truncated = 0;
553 22083 : if (IP_PROTOCOL_TCP == ip0->protocol)
554 : {
555 6904 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
556 6904 : ((tcp_header_t *) (ip0 + 1))->flags;
557 6904 : vnet_buffer (b0)->ip.reass.tcp_ack_number =
558 6904 : ((tcp_header_t *) (ip0 + 1))->ack_number;
559 6904 : vnet_buffer (b0)->ip.reass.tcp_seq_number =
560 6904 : ((tcp_header_t *) (ip0 + 1))->seq_number;
561 : }
562 15179 : else if (IP_PROTOCOL_ICMP == ip0->protocol)
563 : {
564 4017 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
565 4017 : ((icmp46_header_t *) (ip0 + 1))->type;
566 : }
567 22083 : vnet_buffer (b0)->ip.reass.l4_src_port = ip4_get_port (ip0, 1);
568 22139 : vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
569 : }
570 22131 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
571 : {
572 13020 : ip4_sv_reass_add_trace (
573 13020 : vm, node, NULL, from[(b - 2) - bufs], REASS_PASSTHROUGH,
574 13020 : vnet_buffer (b0)->ip.reass.ip_proto,
575 13020 : vnet_buffer (b0)->ip.reass.l4_src_port,
576 13020 : vnet_buffer (b0)->ip.reass.l4_dst_port,
577 13020 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
578 : }
579 22032 : if (is_feature)
580 : {
581 22032 : vnet_feature_next (&next1, b1);
582 : }
583 : else
584 : {
585 0 : next1 = is_custom ? vnet_buffer (b1)->ip.reass.next_index :
586 : IP4_SV_REASSEMBLY_NEXT_INPUT;
587 : }
588 22040 : vnet_buffer (b1)->ip.reass.is_non_first_fragment = 0;
589 22040 : vnet_buffer (b1)->ip.reass.ip_proto = ip1->protocol;
590 22040 : if (l4_layer_truncated (ip1))
591 : {
592 0 : vnet_buffer (b1)->ip.reass.l4_layer_truncated = 1;
593 0 : vnet_buffer (b1)->ip.reass.l4_src_port = 0;
594 0 : vnet_buffer (b1)->ip.reass.l4_dst_port = 0;
595 : }
596 : else
597 : {
598 22052 : vnet_buffer (b1)->ip.reass.l4_layer_truncated = 0;
599 22052 : if (IP_PROTOCOL_TCP == ip1->protocol)
600 : {
601 6935 : vnet_buffer (b1)->ip.reass.icmp_type_or_tcp_flags =
602 6935 : ((tcp_header_t *) (ip1 + 1))->flags;
603 6935 : vnet_buffer (b1)->ip.reass.tcp_ack_number =
604 6935 : ((tcp_header_t *) (ip1 + 1))->ack_number;
605 6935 : vnet_buffer (b1)->ip.reass.tcp_seq_number =
606 6935 : ((tcp_header_t *) (ip1 + 1))->seq_number;
607 : }
608 15117 : else if (IP_PROTOCOL_ICMP == ip1->protocol)
609 : {
610 4145 : vnet_buffer (b1)->ip.reass.icmp_type_or_tcp_flags =
611 4145 : ((icmp46_header_t *) (ip1 + 1))->type;
612 : }
613 22052 : vnet_buffer (b1)->ip.reass.l4_src_port = ip4_get_port (ip1, 1);
614 22150 : vnet_buffer (b1)->ip.reass.l4_dst_port = ip4_get_port (ip1, 0);
615 : }
616 22114 : if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
617 : {
618 13008 : ip4_sv_reass_add_trace (
619 13008 : vm, node, NULL, from[(b - 1) - bufs], REASS_PASSTHROUGH,
620 13008 : vnet_buffer (b1)->ip.reass.ip_proto,
621 13008 : vnet_buffer (b1)->ip.reass.l4_src_port,
622 13008 : vnet_buffer (b1)->ip.reass.l4_dst_port,
623 13008 : vnet_buffer (b1)->ip.reass.l4_layer_truncated);
624 : }
625 :
626 22022 : n_left_from -= 2;
627 22022 : next[0] = next0;
628 22022 : next[1] = next1;
629 22022 : next += 2;
630 22022 : if (with_custom_context)
631 0 : context += 2;
632 : }
633 :
634 1031 : while (n_left_from > 0)
635 : {
636 : vlib_buffer_t *b0;
637 : u32 next0;
638 333 : b0 = *b;
639 333 : b++;
640 :
641 333 : ip4_header_t *ip0 =
642 333 : (ip4_header_t *) u8_ptr_add (vlib_buffer_get_current (b0),
643 : (is_output_feature ? 1 : 0) *
644 : vnet_buffer (b0)->
645 : ip.save_rewrite_length);
646 333 : if (PREDICT_FALSE
647 : (ip4_get_fragment_more (ip0) || ip4_get_fragment_offset (ip0)))
648 : {
649 : // fragment found, go slow path
650 6 : b -= 1;
651 6 : if (b - bufs > 0)
652 : {
653 0 : vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
654 0 : b - bufs);
655 : }
656 6 : goto slow_path;
657 : }
658 327 : if (is_feature)
659 : {
660 327 : vnet_feature_next (&next0, b0);
661 : }
662 : else
663 : {
664 0 : next0 =
665 0 : is_custom ? vnet_buffer (b0)->ip.
666 0 : reass.next_index : IP4_SV_REASSEMBLY_NEXT_INPUT;
667 : }
668 327 : vnet_buffer (b0)->ip.reass.is_non_first_fragment = 0;
669 327 : vnet_buffer (b0)->ip.reass.ip_proto = ip0->protocol;
670 327 : if (l4_layer_truncated (ip0))
671 : {
672 0 : vnet_buffer (b0)->ip.reass.l4_layer_truncated = 1;
673 : }
674 : else
675 : {
676 327 : vnet_buffer (b0)->ip.reass.l4_layer_truncated = 0;
677 327 : if (IP_PROTOCOL_TCP == ip0->protocol)
678 : {
679 252 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
680 252 : ((tcp_header_t *) (ip0 + 1))->flags;
681 252 : vnet_buffer (b0)->ip.reass.tcp_ack_number =
682 252 : ((tcp_header_t *) (ip0 + 1))->ack_number;
683 252 : vnet_buffer (b0)->ip.reass.tcp_seq_number =
684 252 : ((tcp_header_t *) (ip0 + 1))->seq_number;
685 : }
686 75 : else if (IP_PROTOCOL_ICMP == ip0->protocol)
687 : {
688 45 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
689 45 : ((icmp46_header_t *) (ip0 + 1))->type;
690 : }
691 327 : vnet_buffer (b0)->ip.reass.l4_src_port = ip4_get_port (ip0, 1);
692 327 : vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
693 : }
694 327 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
695 : {
696 319 : ip4_sv_reass_add_trace (
697 319 : vm, node, NULL, from[(b - 1) - bufs], REASS_PASSTHROUGH,
698 319 : vnet_buffer (b0)->ip.reass.ip_proto,
699 319 : vnet_buffer (b0)->ip.reass.l4_src_port,
700 319 : vnet_buffer (b0)->ip.reass.l4_dst_port,
701 319 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
702 : }
703 :
704 327 : n_left_from -= 1;
705 327 : next[0] = next0;
706 327 : next += 1;
707 327 : if (with_custom_context)
708 0 : context += 1;
709 : }
710 :
711 698 : vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
712 698 : frame->n_vectors);
713 :
714 698 : goto done;
715 :
716 111 : slow_path:
717 :
718 111 : from += b - bufs;
719 :
720 222 : while (n_left_from > 0)
721 : {
722 111 : if (with_custom_context)
723 0 : vlib_get_next_frame_with_aux_safe (vm, node, next_index, to_next,
724 : to_next_aux, n_left_to_next);
725 : else
726 111 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
727 :
728 1085 : while (n_left_from > 0 && n_left_to_next > 0)
729 : {
730 : u32 bi0;
731 : vlib_buffer_t *b0;
732 : u32 next0;
733 974 : u32 error0 = IP4_ERROR_NONE;
734 974 : u8 forward_context = 0;
735 :
736 974 : bi0 = from[0];
737 974 : b0 = vlib_get_buffer (vm, bi0);
738 :
739 974 : ip4_header_t *ip0 =
740 974 : (ip4_header_t *) u8_ptr_add (vlib_buffer_get_current (b0),
741 : (is_output_feature ? 1 : 0) *
742 : vnet_buffer (b0)->
743 : ip.save_rewrite_length);
744 974 : if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
745 : {
746 : // this is a regular packet - no fragmentation
747 85 : if (is_custom)
748 : {
749 0 : next0 = vnet_buffer (b0)->ip.reass.next_index;
750 : }
751 : else
752 : {
753 85 : next0 = IP4_SV_REASSEMBLY_NEXT_INPUT;
754 : }
755 85 : vnet_buffer (b0)->ip.reass.is_non_first_fragment = 0;
756 85 : vnet_buffer (b0)->ip.reass.ip_proto = ip0->protocol;
757 85 : if (l4_layer_truncated (ip0))
758 : {
759 0 : vnet_buffer (b0)->ip.reass.l4_layer_truncated = 1;
760 0 : vnet_buffer (b0)->ip.reass.l4_src_port = 0;
761 0 : vnet_buffer (b0)->ip.reass.l4_dst_port = 0;
762 : }
763 : else
764 : {
765 85 : vnet_buffer (b0)->ip.reass.l4_layer_truncated = 0;
766 85 : if (IP_PROTOCOL_TCP == ip0->protocol)
767 : {
768 0 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
769 0 : ((tcp_header_t *) (ip0 + 1))->flags;
770 0 : vnet_buffer (b0)->ip.reass.tcp_ack_number =
771 0 : ((tcp_header_t *) (ip0 + 1))->ack_number;
772 0 : vnet_buffer (b0)->ip.reass.tcp_seq_number =
773 0 : ((tcp_header_t *) (ip0 + 1))->seq_number;
774 : }
775 85 : else if (IP_PROTOCOL_ICMP == ip0->protocol)
776 : {
777 0 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
778 0 : ((icmp46_header_t *) (ip0 + 1))->type;
779 : }
780 170 : vnet_buffer (b0)->ip.reass.l4_src_port =
781 85 : ip4_get_port (ip0, 1);
782 85 : vnet_buffer (b0)->ip.reass.l4_dst_port =
783 85 : ip4_get_port (ip0, 0);
784 : }
785 85 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
786 : {
787 85 : ip4_sv_reass_add_trace (
788 : vm, node, NULL, bi0, REASS_PASSTHROUGH,
789 85 : vnet_buffer (b0)->ip.reass.ip_proto,
790 85 : vnet_buffer (b0)->ip.reass.l4_src_port,
791 85 : vnet_buffer (b0)->ip.reass.l4_dst_port,
792 85 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
793 : }
794 85 : goto packet_enqueue;
795 : }
796 889 : const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
797 889 : const u32 fragment_length =
798 889 : clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
799 889 : const u32 fragment_last = fragment_first + fragment_length - 1;
800 889 : if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
801 : {
802 0 : next0 = IP4_SV_REASSEMBLY_NEXT_DROP;
803 0 : error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
804 0 : b0->error = node->errors[error0];
805 0 : goto packet_enqueue;
806 : }
807 : ip4_sv_reass_kv_t kv;
808 889 : u8 do_handoff = 0;
809 :
810 889 : if (with_custom_context)
811 0 : kv.k.as_u64[0] = (u64) *context | (u64) ip0->src_address.as_u32
812 0 : << 32;
813 : else
814 889 : kv.k.as_u64[0] =
815 889 : (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
816 889 : vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
817 889 : (u64) ip0->src_address.as_u32 << 32;
818 889 : kv.k.as_u64[1] = (u64) ip0->dst_address.as_u32 |
819 889 : (u64) ip0->fragment_id << 32 |
820 889 : (u64) ip0->protocol << 48;
821 :
822 : ip4_sv_reass_t *reass =
823 889 : ip4_sv_reass_find_or_create (vm, rm, rt, &kv, &do_handoff);
824 :
825 889 : if (PREDICT_FALSE (do_handoff))
826 : {
827 0 : next0 = IP4_SV_REASSEMBLY_NEXT_HANDOFF;
828 0 : vnet_buffer (b0)->ip.reass.owner_thread_index =
829 0 : kv.v.thread_index;
830 0 : if (with_custom_context)
831 0 : forward_context = 1;
832 0 : goto packet_enqueue;
833 : }
834 :
835 889 : if (!reass)
836 : {
837 0 : next0 = IP4_SV_REASSEMBLY_NEXT_DROP;
838 0 : error0 = IP4_ERROR_REASS_LIMIT_REACHED;
839 0 : b0->error = node->errors[error0];
840 0 : goto packet_enqueue;
841 : }
842 :
843 889 : if (reass->is_complete)
844 : {
845 121 : if (is_custom)
846 : {
847 4 : next0 = vnet_buffer (b0)->ip.reass.next_index;
848 : }
849 : else
850 : {
851 117 : next0 = IP4_SV_REASSEMBLY_NEXT_INPUT;
852 : }
853 121 : vnet_buffer (b0)->ip.reass.is_non_first_fragment =
854 121 : ! !fragment_first;
855 121 : vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto;
856 121 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
857 121 : reass->icmp_type_or_tcp_flags;
858 121 : vnet_buffer (b0)->ip.reass.tcp_ack_number =
859 121 : reass->tcp_ack_number;
860 121 : vnet_buffer (b0)->ip.reass.tcp_seq_number =
861 121 : reass->tcp_seq_number;
862 121 : vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port;
863 121 : vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
864 121 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
865 : {
866 121 : ip4_sv_reass_add_trace (
867 : vm, node, reass, bi0, REASS_FRAGMENT_FORWARD,
868 121 : reass->ip_proto, reass->l4_src_port, reass->l4_dst_port,
869 121 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
870 : }
871 121 : goto packet_enqueue;
872 : }
873 :
874 : ip4_sv_reass_rc_t rc =
875 768 : ip4_sv_reass_update (vm, node, rm, ip0, reass, bi0);
876 768 : u32 counter = ~0;
877 768 : switch (rc)
878 : {
879 768 : case IP4_SV_REASS_RC_OK:
880 : /* nothing to do here */
881 768 : break;
882 0 : case IP4_SV_REASS_RC_TOO_MANY_FRAGMENTS:
883 0 : counter = IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG;
884 0 : break;
885 0 : case IP4_SV_REASS_RC_UNSUPP_IP_PROTO:
886 0 : counter = IP4_ERROR_REASS_UNSUPP_IP_PROT;
887 0 : break;
888 : }
889 768 : if (~0 != counter)
890 : {
891 0 : vlib_node_increment_counter (vm, node->node_index, counter, 1);
892 0 : ip4_sv_reass_free (vm, rm, rt, reass);
893 0 : goto next_packet;
894 : }
895 768 : if (reass->is_complete)
896 : {
897 : u32 idx;
898 1433 : vec_foreach_index (idx, reass->cached_buffers)
899 : {
900 765 : u32 bi0 = vec_elt (reass->cached_buffers, idx);
901 765 : vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
902 765 : ip0 =
903 765 : (ip4_header_t *) u8_ptr_add (vlib_buffer_get_current (b0),
904 : (is_output_feature ? 1 : 0) *
905 : vnet_buffer (b0)->
906 : ip.save_rewrite_length);
907 765 : u32 next0 = IP4_SV_REASSEMBLY_NEXT_INPUT;
908 765 : if (is_feature)
909 : {
910 761 : vnet_feature_next (&next0, b0);
911 : }
912 765 : if (is_custom)
913 : {
914 4 : next0 = vnet_buffer (b0)->ip.reass.next_index;
915 : }
916 765 : if (0 == n_left_to_next)
917 : {
918 0 : vlib_put_next_frame (vm, node, next_index,
919 : n_left_to_next);
920 0 : vlib_get_next_frame (vm, node, next_index, to_next,
921 : n_left_to_next);
922 : }
923 765 : to_next[0] = bi0;
924 765 : to_next += 1;
925 765 : n_left_to_next -= 1;
926 765 : vnet_buffer (b0)->ip.reass.is_non_first_fragment =
927 765 : ! !ip4_get_fragment_offset (ip0);
928 765 : vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto;
929 765 : vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
930 765 : reass->icmp_type_or_tcp_flags;
931 765 : vnet_buffer (b0)->ip.reass.tcp_ack_number =
932 765 : reass->tcp_ack_number;
933 765 : vnet_buffer (b0)->ip.reass.tcp_seq_number =
934 765 : reass->tcp_seq_number;
935 765 : vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port;
936 765 : vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
937 765 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
938 : {
939 765 : ip4_sv_reass_add_trace (
940 : vm, node, reass, bi0, REASS_FRAGMENT_FORWARD,
941 765 : reass->ip_proto, reass->l4_src_port, reass->l4_dst_port,
942 765 : vnet_buffer (b0)->ip.reass.l4_layer_truncated);
943 : }
944 765 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
945 : to_next, n_left_to_next, bi0,
946 : next0);
947 : }
948 668 : vec_set_len (reass->cached_buffers,
949 : 0); // buffers are owned by frame now
950 : }
951 768 : goto next_packet;
952 :
953 206 : packet_enqueue:
954 206 : to_next[0] = bi0;
955 206 : to_next += 1;
956 206 : n_left_to_next -= 1;
957 206 : if (is_feature && IP4_ERROR_NONE == error0)
958 : {
959 202 : b0 = vlib_get_buffer (vm, bi0);
960 202 : vnet_feature_next (&next0, b0);
961 : }
962 206 : if (with_custom_context && forward_context)
963 : {
964 0 : if (to_next_aux)
965 : {
966 0 : to_next_aux[0] = *context;
967 0 : to_next_aux += 1;
968 : }
969 0 : vlib_validate_buffer_enqueue_with_aux_x1 (
970 : vm, node, next_index, to_next, to_next_aux, n_left_to_next,
971 : bi0, *context, next0);
972 : }
973 : else
974 206 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
975 : n_left_to_next, bi0, next0);
976 :
977 206 : next_packet:
978 974 : from += 1;
979 974 : n_left_from -= 1;
980 974 : if (with_custom_context)
981 0 : context += 1;
982 : }
983 :
984 111 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
985 : }
986 :
987 111 : done:
988 809 : clib_spinlock_unlock (&rt->lock);
989 809 : return frame->n_vectors;
990 : }
991 :
992 2236 : VLIB_NODE_FN (ip4_sv_reass_node) (vlib_main_t * vm,
993 : vlib_node_runtime_t * node,
994 : vlib_frame_t * frame)
995 : {
996 0 : return ip4_sv_reass_inline (
997 : vm, node, frame, false /* is_feature */, false /* is_output_feature */,
998 : false /* is_custom */, false /* with_custom_context */);
999 : }
1000 :
1001 : /* *INDENT-OFF* */
1002 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_node) = {
1003 : .name = "ip4-sv-reassembly",
1004 : .vector_size = sizeof (u32),
1005 : .format_trace = format_ip4_sv_reass_trace,
1006 : .n_errors = IP4_N_ERROR,
1007 : .error_counters = ip4_error_counters,
1008 : .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
1009 : .next_nodes =
1010 : {
1011 : [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1012 : [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
1013 : [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-handoff",
1014 :
1015 : },
1016 : };
1017 : /* *INDENT-ON* */
1018 :
1019 3008 : VLIB_NODE_FN (ip4_sv_reass_node_feature) (vlib_main_t * vm,
1020 : vlib_node_runtime_t * node,
1021 : vlib_frame_t * frame)
1022 : {
1023 772 : return ip4_sv_reass_inline (
1024 : vm, node, frame, true /* is_feature */, false /* is_output_feature */,
1025 : false /* is_custom */, false /* with_custom_context */);
1026 : }
1027 :
1028 : /* *INDENT-OFF* */
1029 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_node_feature) = {
1030 : .name = "ip4-sv-reassembly-feature",
1031 : .vector_size = sizeof (u32),
1032 : .format_trace = format_ip4_sv_reass_trace,
1033 : .n_errors = IP4_N_ERROR,
1034 : .error_counters = ip4_error_counters,
1035 : .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
1036 : .next_nodes =
1037 : {
1038 : [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1039 : [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
1040 : [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reass-feature-hoff",
1041 : },
1042 : };
1043 : /* *INDENT-ON* */
1044 :
1045 : /* *INDENT-OFF* */
1046 70583 : VNET_FEATURE_INIT (ip4_sv_reass_feature) = {
1047 : .arc_name = "ip4-unicast",
1048 : .node_name = "ip4-sv-reassembly-feature",
1049 : .runs_before = VNET_FEATURES ("ip4-lookup"),
1050 : .runs_after = 0,
1051 : };
1052 : /* *INDENT-ON* */
1053 :
1054 2271 : VLIB_NODE_FN (ip4_sv_reass_node_output_feature) (vlib_main_t * vm,
1055 : vlib_node_runtime_t * node,
1056 : vlib_frame_t * frame)
1057 : {
1058 35 : return ip4_sv_reass_inline (
1059 : vm, node, frame, true /* is_feature */, true /* is_output_feature */,
1060 : false /* is_custom */, false /* with_custom_context */);
1061 : }
1062 :
1063 :
1064 : /* *INDENT-OFF* */
1065 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_node_output_feature) = {
1066 : .name = "ip4-sv-reassembly-output-feature",
1067 : .vector_size = sizeof (u32),
1068 : .format_trace = format_ip4_sv_reass_trace,
1069 : .n_errors = IP4_N_ERROR,
1070 : .error_counters = ip4_error_counters,
1071 : .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
1072 : .next_nodes =
1073 : {
1074 : [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1075 : [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
1076 : [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reass-feature-hoff",
1077 : },
1078 : };
1079 : /* *INDENT-ON* */
1080 :
1081 : /* *INDENT-OFF* */
1082 70583 : VNET_FEATURE_INIT (ip4_sv_reass_output_feature) = {
1083 : .arc_name = "ip4-output",
1084 : .node_name = "ip4-sv-reassembly-output-feature",
1085 : .runs_before = 0,
1086 : .runs_after = 0,
1087 : };
1088 : /* *INDENT-ON* */
1089 :
1090 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_custom_node) = {
1091 : .name = "ip4-sv-reassembly-custom-next",
1092 : .vector_size = sizeof (u32),
1093 : .format_trace = format_ip4_sv_reass_trace,
1094 : .n_errors = IP4_N_ERROR,
1095 : .error_counters = ip4_error_counters,
1096 : .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
1097 : .next_nodes =
1098 : {
1099 : [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1100 : [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
1101 : [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-handoff",
1102 :
1103 : },
1104 : };
1105 :
1106 2238 : VLIB_NODE_FN (ip4_sv_reass_custom_node) (vlib_main_t * vm,
1107 : vlib_node_runtime_t * node,
1108 : vlib_frame_t * frame)
1109 : {
1110 2 : return ip4_sv_reass_inline (
1111 : vm, node, frame, false /* is_feature */, false /* is_output_feature */,
1112 : true /* is_custom */, false /* with_custom_context */);
1113 : }
1114 :
1115 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_custom_context_node) = {
1116 : .name = "ip4-sv-reassembly-custom-context",
1117 : .vector_size = sizeof (u32),
1118 : .aux_size = sizeof(u32),
1119 : .format_trace = format_ip4_sv_reass_trace,
1120 : .n_errors = IP4_N_ERROR,
1121 : .error_counters = ip4_error_counters,
1122 : .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
1123 : .next_nodes =
1124 : {
1125 : [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1126 : [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
1127 : [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-custom-context-handoff",
1128 :
1129 : },
1130 : };
1131 :
1132 2236 : VLIB_NODE_FN (ip4_sv_reass_custom_context_node)
1133 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
1134 : {
1135 0 : return ip4_sv_reass_inline (
1136 : vm, node, frame, false /* is_feature */, false /* is_output_feature */,
1137 : true /* is_custom */, true /* with_custom_context */);
1138 : }
1139 :
1140 : #ifndef CLIB_MARCH_VARIANT
1141 : always_inline u32
1142 583 : ip4_sv_reass_get_nbuckets ()
1143 : {
1144 583 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1145 : u32 nbuckets;
1146 : u8 i;
1147 :
1148 583 : nbuckets = (u32) (rm->max_reass_n / IP4_SV_REASS_HT_LOAD_FACTOR);
1149 :
1150 6974 : for (i = 0; i < 31; i++)
1151 6974 : if ((1 << i) >= nbuckets)
1152 583 : break;
1153 583 : nbuckets = 1 << i;
1154 :
1155 583 : return nbuckets;
1156 : }
1157 : #endif /* CLIB_MARCH_VARIANT */
1158 :
1159 : typedef enum
1160 : {
1161 : IP4_EVENT_CONFIG_CHANGED = 1,
1162 : } ip4_sv_reass_event_t;
1163 :
1164 : typedef struct
1165 : {
1166 : int failure;
1167 : clib_bihash_16_8_t *new_hash;
1168 : } ip4_rehash_cb_ctx;
1169 :
1170 : #ifndef CLIB_MARCH_VARIANT
1171 : static int
1172 1 : ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
1173 : {
1174 1 : ip4_rehash_cb_ctx *ctx = _ctx;
1175 1 : if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
1176 : {
1177 0 : ctx->failure = 1;
1178 : }
1179 1 : return (BIHASH_WALK_CONTINUE);
1180 : }
1181 :
1182 : static void
1183 571 : ip4_sv_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1184 : u32 max_reassembly_length,
1185 : u32 expire_walk_interval_ms)
1186 : {
1187 571 : ip4_sv_reass_main.timeout_ms = timeout_ms;
1188 571 : ip4_sv_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1189 571 : ip4_sv_reass_main.max_reass_n = max_reassemblies;
1190 571 : ip4_sv_reass_main.max_reass_len = max_reassembly_length;
1191 571 : ip4_sv_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1192 571 : }
1193 :
1194 : vnet_api_error_t
1195 12 : ip4_sv_reass_set (u32 timeout_ms, u32 max_reassemblies,
1196 : u32 max_reassembly_length, u32 expire_walk_interval_ms)
1197 : {
1198 12 : u32 old_nbuckets = ip4_sv_reass_get_nbuckets ();
1199 12 : ip4_sv_reass_set_params (timeout_ms, max_reassemblies,
1200 : max_reassembly_length, expire_walk_interval_ms);
1201 12 : vlib_process_signal_event (ip4_sv_reass_main.vlib_main,
1202 12 : ip4_sv_reass_main.ip4_sv_reass_expire_node_idx,
1203 : IP4_EVENT_CONFIG_CHANGED, 0);
1204 12 : u32 new_nbuckets = ip4_sv_reass_get_nbuckets ();
1205 12 : if (ip4_sv_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
1206 : {
1207 : clib_bihash_16_8_t new_hash;
1208 1 : clib_memset (&new_hash, 0, sizeof (new_hash));
1209 : ip4_rehash_cb_ctx ctx;
1210 1 : ctx.failure = 0;
1211 1 : ctx.new_hash = &new_hash;
1212 1 : clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
1213 1 : new_nbuckets * 1024);
1214 1 : clib_bihash_foreach_key_value_pair_16_8 (&ip4_sv_reass_main.hash,
1215 : ip4_rehash_cb, &ctx);
1216 1 : if (ctx.failure)
1217 : {
1218 0 : clib_bihash_free_16_8 (&new_hash);
1219 0 : return -1;
1220 : }
1221 : else
1222 : {
1223 1 : clib_bihash_free_16_8 (&ip4_sv_reass_main.hash);
1224 1 : clib_memcpy_fast (&ip4_sv_reass_main.hash, &new_hash,
1225 : sizeof (ip4_sv_reass_main.hash));
1226 1 : clib_bihash_copied (&ip4_sv_reass_main.hash, &new_hash);
1227 : }
1228 : }
1229 12 : return 0;
1230 : }
1231 :
1232 : vnet_api_error_t
1233 0 : ip4_sv_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1234 : u32 * max_reassembly_length, u32 * expire_walk_interval_ms)
1235 : {
1236 0 : *timeout_ms = ip4_sv_reass_main.timeout_ms;
1237 0 : *max_reassemblies = ip4_sv_reass_main.max_reass_n;
1238 0 : *max_reassembly_length = ip4_sv_reass_main.max_reass_len;
1239 0 : *expire_walk_interval_ms = ip4_sv_reass_main.expire_walk_interval_ms;
1240 0 : return 0;
1241 : }
1242 :
1243 : static clib_error_t *
1244 559 : ip4_sv_reass_init_function (vlib_main_t * vm)
1245 : {
1246 559 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1247 559 : clib_error_t *error = 0;
1248 : u32 nbuckets;
1249 : vlib_node_t *node;
1250 :
1251 559 : rm->vlib_main = vm;
1252 559 : rm->vnet_main = vnet_get_main ();
1253 :
1254 559 : vec_validate (rm->per_thread_data, vlib_num_workers ());
1255 : ip4_sv_reass_per_thread_t *rt;
1256 1172 : vec_foreach (rt, rm->per_thread_data)
1257 : {
1258 613 : clib_spinlock_init (&rt->lock);
1259 613 : pool_alloc (rt->pool, rm->max_reass_n);
1260 613 : rt->lru_first = rt->lru_last = ~0;
1261 : }
1262 :
1263 559 : node = vlib_get_node_by_name (vm, (u8 *) "ip4-sv-reassembly-expire-walk");
1264 559 : ASSERT (node);
1265 559 : rm->ip4_sv_reass_expire_node_idx = node->index;
1266 :
1267 559 : ip4_sv_reass_set_params (IP4_SV_REASS_TIMEOUT_DEFAULT_MS,
1268 : IP4_SV_REASS_MAX_REASSEMBLIES_DEFAULT,
1269 : IP4_SV_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1270 : IP4_SV_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
1271 :
1272 559 : nbuckets = ip4_sv_reass_get_nbuckets ();
1273 559 : clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
1274 :
1275 559 : node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
1276 559 : ASSERT (node);
1277 559 : rm->ip4_drop_idx = node->index;
1278 :
1279 559 : rm->fq_index = vlib_frame_queue_main_init (ip4_sv_reass_node.index, 0);
1280 559 : rm->fq_feature_index =
1281 559 : vlib_frame_queue_main_init (ip4_sv_reass_node_feature.index, 0);
1282 559 : rm->fq_custom_context_index =
1283 559 : vlib_frame_queue_main_init (ip4_sv_reass_custom_context_node.index, 0);
1284 :
1285 559 : rm->feature_use_refcount_per_intf = NULL;
1286 559 : rm->output_feature_use_refcount_per_intf = NULL;
1287 :
1288 559 : return error;
1289 : }
1290 :
1291 38639 : VLIB_INIT_FUNCTION (ip4_sv_reass_init_function);
1292 : #endif /* CLIB_MARCH_VARIANT */
1293 :
1294 : static uword
1295 559 : ip4_sv_reass_walk_expired (vlib_main_t *vm,
1296 : CLIB_UNUSED (vlib_node_runtime_t *node),
1297 : CLIB_UNUSED (vlib_frame_t *f))
1298 : {
1299 559 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1300 559 : uword event_type, *event_data = 0;
1301 :
1302 : while (true)
1303 676 : {
1304 1235 : vlib_process_wait_for_event_or_clock (vm,
1305 1235 : (f64)
1306 1235 : rm->expire_walk_interval_ms /
1307 : (f64) MSEC_PER_SEC);
1308 676 : event_type = vlib_process_get_events (vm, &event_data);
1309 :
1310 676 : switch (event_type)
1311 : {
1312 676 : case ~0:
1313 : /* no events => timeout */
1314 : /* fallthrough */
1315 : case IP4_EVENT_CONFIG_CHANGED:
1316 : /* nothing to do here */
1317 676 : break;
1318 0 : default:
1319 0 : clib_warning ("BUG: event type 0x%wx", event_type);
1320 0 : break;
1321 : }
1322 676 : f64 now = vlib_time_now (vm);
1323 :
1324 : ip4_sv_reass_t *reass;
1325 676 : int *pool_indexes_to_free = NULL;
1326 :
1327 676 : uword thread_index = 0;
1328 : int index;
1329 676 : const uword nthreads = vlib_num_workers () + 1;
1330 1534 : for (thread_index = 0; thread_index < nthreads; ++thread_index)
1331 : {
1332 858 : ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1333 858 : clib_spinlock_lock (&rt->lock);
1334 :
1335 858 : vec_reset_length (pool_indexes_to_free);
1336 : /* *INDENT-OFF* */
1337 1519 : pool_foreach_index (index, rt->pool) {
1338 661 : reass = pool_elt_at_index (rt->pool, index);
1339 661 : if (now > reass->last_heard + rm->timeout)
1340 : {
1341 657 : vec_add1 (pool_indexes_to_free, index);
1342 : }
1343 : }
1344 : /* *INDENT-ON* */
1345 : int *i;
1346 : /* *INDENT-OFF* */
1347 1515 : vec_foreach (i, pool_indexes_to_free)
1348 : {
1349 657 : ip4_sv_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1350 657 : ip4_sv_reass_free (vm, rm, rt, reass);
1351 : }
1352 : /* *INDENT-ON* */
1353 :
1354 858 : clib_spinlock_unlock (&rt->lock);
1355 : }
1356 :
1357 676 : vec_free (pool_indexes_to_free);
1358 676 : if (event_data)
1359 : {
1360 179 : vec_set_len (event_data, 0);
1361 : }
1362 : }
1363 :
1364 : return 0;
1365 : }
1366 :
1367 : /* *INDENT-OFF* */
1368 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_expire_node) = {
1369 : .function = ip4_sv_reass_walk_expired,
1370 : .type = VLIB_NODE_TYPE_PROCESS,
1371 : .name = "ip4-sv-reassembly-expire-walk",
1372 : .format_trace = format_ip4_sv_reass_trace,
1373 : .n_errors = IP4_N_ERROR,
1374 : .error_counters = ip4_error_counters,
1375 : };
1376 : /* *INDENT-ON* */
1377 :
1378 : static u8 *
1379 4671 : format_ip4_sv_reass_key (u8 * s, va_list * args)
1380 : {
1381 4671 : ip4_sv_reass_key_t *key = va_arg (*args, ip4_sv_reass_key_t *);
1382 : s =
1383 4671 : format (s, "fib_index: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1384 : key->fib_index, format_ip4_address, &key->src, format_ip4_address,
1385 4671 : &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1386 4671 : return s;
1387 : }
1388 :
1389 : static u8 *
1390 4671 : format_ip4_sv_reass (u8 * s, va_list * args)
1391 : {
1392 4671 : vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1393 4671 : ip4_sv_reass_t *reass = va_arg (*args, ip4_sv_reass_t *);
1394 :
1395 4671 : s = format (s, "ID: %lu, key: %U trace_op_counter: %u\n",
1396 : reass->id, format_ip4_sv_reass_key, &reass->key,
1397 : reass->trace_op_counter);
1398 :
1399 : vlib_buffer_t *b;
1400 : u32 *bip;
1401 4671 : u32 counter = 0;
1402 4672 : vec_foreach (bip, reass->cached_buffers)
1403 : {
1404 1 : u32 bi = *bip;
1405 : do
1406 : {
1407 1 : b = vlib_get_buffer (vm, bi);
1408 1 : s = format (s, " #%03u: bi: %u, ", counter, bi);
1409 1 : ++counter;
1410 1 : bi = b->next_buffer;
1411 : }
1412 1 : while (b->flags & VLIB_BUFFER_NEXT_PRESENT);
1413 : }
1414 4671 : return s;
1415 : }
1416 :
1417 : static clib_error_t *
1418 29 : show_ip4_reass (vlib_main_t * vm,
1419 : unformat_input_t * input,
1420 : CLIB_UNUSED (vlib_cli_command_t * lmd))
1421 : {
1422 29 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1423 :
1424 29 : vlib_cli_output (vm, "---------------------");
1425 29 : vlib_cli_output (vm, "IP4 reassembly status");
1426 29 : vlib_cli_output (vm, "---------------------");
1427 29 : bool details = false;
1428 29 : if (unformat (input, "details"))
1429 : {
1430 29 : details = true;
1431 : }
1432 :
1433 29 : u32 sum_reass_n = 0;
1434 : ip4_sv_reass_t *reass;
1435 : uword thread_index;
1436 29 : const uword nthreads = vlib_num_workers () + 1;
1437 58 : for (thread_index = 0; thread_index < nthreads; ++thread_index)
1438 : {
1439 29 : ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1440 29 : clib_spinlock_lock (&rt->lock);
1441 29 : if (details)
1442 : {
1443 : /* *INDENT-OFF* */
1444 4700 : pool_foreach (reass, rt->pool) {
1445 4671 : vlib_cli_output (vm, "%U", format_ip4_sv_reass, vm, reass);
1446 : }
1447 : /* *INDENT-ON* */
1448 : }
1449 29 : sum_reass_n += rt->reass_n;
1450 29 : clib_spinlock_unlock (&rt->lock);
1451 : }
1452 29 : vlib_cli_output (vm, "---------------------");
1453 29 : vlib_cli_output (vm, "Current IP4 reassemblies count: %lu\n",
1454 : (long unsigned) sum_reass_n);
1455 29 : vlib_cli_output (vm,
1456 : "Maximum configured concurrent shallow virtual IP4 reassemblies per worker-thread: %lu\n",
1457 29 : (long unsigned) rm->max_reass_n);
1458 29 : vlib_cli_output (vm,
1459 : "Maximum configured amount of fragments per shallow "
1460 : "virtual IP4 reassembly: %lu\n",
1461 29 : (long unsigned) rm->max_reass_len);
1462 29 : vlib_cli_output (vm,
1463 : "Maximum configured shallow virtual IP4 reassembly timeout: %lums\n",
1464 29 : (long unsigned) rm->timeout_ms);
1465 29 : vlib_cli_output (vm,
1466 : "Maximum configured shallow virtual IP4 reassembly expire walk interval: %lums\n",
1467 29 : (long unsigned) rm->expire_walk_interval_ms);
1468 29 : return 0;
1469 : }
1470 :
1471 : /* *INDENT-OFF* */
1472 272887 : VLIB_CLI_COMMAND (show_ip4_sv_reass_cmd, static) = {
1473 : .path = "show ip4-sv-reassembly",
1474 : .short_help = "show ip4-sv-reassembly [details]",
1475 : .function = show_ip4_reass,
1476 : };
1477 : /* *INDENT-ON* */
1478 :
1479 : #ifndef CLIB_MARCH_VARIANT
1480 : vnet_api_error_t
1481 12 : ip4_sv_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
1482 : {
1483 12 : return ip4_sv_reass_enable_disable_with_refcnt (sw_if_index,
1484 : enable_disable);
1485 : }
1486 : #endif /* CLIB_MARCH_VARIANT */
1487 :
1488 :
1489 : #define foreach_ip4_sv_reass_handoff_error \
1490 : _(CONGESTION_DROP, "congestion drop")
1491 :
1492 :
1493 : typedef enum
1494 : {
1495 : #define _(sym,str) IP4_SV_REASSEMBLY_HANDOFF_ERROR_##sym,
1496 : foreach_ip4_sv_reass_handoff_error
1497 : #undef _
1498 : IP4_SV_REASSEMBLY_HANDOFF_N_ERROR,
1499 : } ip4_sv_reass_handoff_error_t;
1500 :
1501 : static char *ip4_sv_reass_handoff_error_strings[] = {
1502 : #define _(sym,string) string,
1503 : foreach_ip4_sv_reass_handoff_error
1504 : #undef _
1505 : };
1506 :
1507 : typedef struct
1508 : {
1509 : u32 next_worker_index;
1510 : } ip4_sv_reass_handoff_trace_t;
1511 :
1512 : static u8 *
1513 0 : format_ip4_sv_reass_handoff_trace (u8 * s, va_list * args)
1514 : {
1515 0 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1516 0 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1517 0 : ip4_sv_reass_handoff_trace_t *t =
1518 : va_arg (*args, ip4_sv_reass_handoff_trace_t *);
1519 :
1520 : s =
1521 0 : format (s, "ip4-sv-reassembly-handoff: next-worker %d",
1522 : t->next_worker_index);
1523 :
1524 0 : return s;
1525 : }
1526 :
1527 : always_inline uword
1528 0 : ip4_sv_reass_handoff_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1529 : vlib_frame_t *frame, bool is_feature,
1530 : bool is_custom_context)
1531 : {
1532 0 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1533 :
1534 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1535 : u32 n_enq, n_left_from, *from, *context;
1536 : u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1537 : u32 fq_index;
1538 :
1539 0 : from = vlib_frame_vector_args (frame);
1540 0 : if (is_custom_context)
1541 0 : context = vlib_frame_aux_args (frame);
1542 :
1543 0 : n_left_from = frame->n_vectors;
1544 0 : vlib_get_buffers (vm, from, bufs, n_left_from);
1545 :
1546 0 : b = bufs;
1547 0 : ti = thread_indices;
1548 :
1549 0 : fq_index = (is_feature) ? rm->fq_feature_index :
1550 0 : (is_custom_context ? rm->fq_custom_context_index :
1551 : rm->fq_index);
1552 :
1553 0 : while (n_left_from > 0)
1554 : {
1555 0 : ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
1556 :
1557 0 : if (PREDICT_FALSE
1558 : ((node->flags & VLIB_NODE_FLAG_TRACE)
1559 : && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1560 : {
1561 : ip4_sv_reass_handoff_trace_t *t =
1562 0 : vlib_add_trace (vm, node, b[0], sizeof (*t));
1563 0 : t->next_worker_index = ti[0];
1564 : }
1565 :
1566 0 : n_left_from -= 1;
1567 0 : ti += 1;
1568 0 : b += 1;
1569 : }
1570 0 : if (is_custom_context)
1571 0 : n_enq = vlib_buffer_enqueue_to_thread_with_aux (
1572 0 : vm, node, fq_index, from, context, thread_indices, frame->n_vectors, 1);
1573 : else
1574 0 : n_enq = vlib_buffer_enqueue_to_thread (
1575 0 : vm, node, fq_index, from, thread_indices, frame->n_vectors, 1);
1576 :
1577 0 : if (n_enq < frame->n_vectors)
1578 0 : vlib_node_increment_counter (vm, node->node_index,
1579 : IP4_SV_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1580 0 : frame->n_vectors - n_enq);
1581 0 : return frame->n_vectors;
1582 : }
1583 :
1584 2236 : VLIB_NODE_FN (ip4_sv_reass_handoff_node) (vlib_main_t * vm,
1585 : vlib_node_runtime_t * node,
1586 : vlib_frame_t * frame)
1587 : {
1588 0 : return ip4_sv_reass_handoff_node_inline (
1589 : vm, node, frame, false /* is_feature */, false /* is_custom_context */);
1590 : }
1591 :
1592 :
1593 : /* *INDENT-OFF* */
1594 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_handoff_node) = {
1595 : .name = "ip4-sv-reassembly-handoff",
1596 : .vector_size = sizeof (u32),
1597 : .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1598 : .error_strings = ip4_sv_reass_handoff_error_strings,
1599 : .format_trace = format_ip4_sv_reass_handoff_trace,
1600 :
1601 : .n_next_nodes = 1,
1602 :
1603 : .next_nodes = {
1604 : [0] = "error-drop",
1605 : },
1606 : };
1607 : /* *INDENT-ON* */
1608 :
1609 2236 : VLIB_NODE_FN (ip4_sv_reass_custom_context_handoff_node)
1610 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
1611 : {
1612 0 : return ip4_sv_reass_handoff_node_inline (
1613 : vm, node, frame, false /* is_feature */, true /* is_custom_context */);
1614 : }
1615 :
1616 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_custom_context_handoff_node) = {
1617 : .name = "ip4-sv-reassembly-custom-context-handoff",
1618 : .vector_size = sizeof (u32),
1619 : .aux_size = sizeof (u32),
1620 : .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1621 : .error_strings = ip4_sv_reass_handoff_error_strings,
1622 : .format_trace = format_ip4_sv_reass_handoff_trace,
1623 :
1624 : .n_next_nodes = 1,
1625 :
1626 : .next_nodes = {
1627 : [0] = "error-drop",
1628 : },
1629 : };
1630 :
1631 : /* *INDENT-OFF* */
1632 2236 : VLIB_NODE_FN (ip4_sv_reass_feature_handoff_node) (vlib_main_t * vm,
1633 : vlib_node_runtime_t *
1634 : node,
1635 : vlib_frame_t * frame)
1636 : {
1637 0 : return ip4_sv_reass_handoff_node_inline (
1638 : vm, node, frame, true /* is_feature */, false /* is_custom_context */);
1639 : }
1640 : /* *INDENT-ON* */
1641 :
1642 :
1643 : /* *INDENT-OFF* */
1644 178120 : VLIB_REGISTER_NODE (ip4_sv_reass_feature_handoff_node) = {
1645 : .name = "ip4-sv-reass-feature-hoff",
1646 : .vector_size = sizeof (u32),
1647 : .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1648 : .error_strings = ip4_sv_reass_handoff_error_strings,
1649 : .format_trace = format_ip4_sv_reass_handoff_trace,
1650 :
1651 : .n_next_nodes = 1,
1652 :
1653 : .next_nodes = {
1654 : [0] = "error-drop",
1655 : },
1656 : };
1657 : /* *INDENT-ON* */
1658 :
1659 : #ifndef CLIB_MARCH_VARIANT
1660 : int
1661 677 : ip4_sv_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
1662 : {
1663 677 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1664 677 : vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1665 677 : if (is_enable)
1666 : {
1667 348 : if (!rm->feature_use_refcount_per_intf[sw_if_index])
1668 : {
1669 298 : ++rm->feature_use_refcount_per_intf[sw_if_index];
1670 298 : return vnet_feature_enable_disable ("ip4-unicast",
1671 : "ip4-sv-reassembly-feature",
1672 : sw_if_index, 1, 0, 0);
1673 : }
1674 50 : ++rm->feature_use_refcount_per_intf[sw_if_index];
1675 : }
1676 : else
1677 : {
1678 329 : if (rm->feature_use_refcount_per_intf[sw_if_index])
1679 322 : --rm->feature_use_refcount_per_intf[sw_if_index];
1680 329 : if (!rm->feature_use_refcount_per_intf[sw_if_index])
1681 298 : return vnet_feature_enable_disable ("ip4-unicast",
1682 : "ip4-sv-reassembly-feature",
1683 : sw_if_index, 0, 0, 0);
1684 : }
1685 81 : return 0;
1686 : }
1687 :
1688 : uword
1689 559 : ip4_sv_reass_custom_register_next_node (uword node_index)
1690 : {
1691 559 : return vlib_node_add_next (vlib_get_main (), ip4_sv_reass_custom_node.index,
1692 : node_index);
1693 : }
1694 :
1695 : uword
1696 0 : ip4_sv_reass_custom_context_register_next_node (uword node_index)
1697 : {
1698 0 : return vlib_node_add_next (
1699 0 : vlib_get_main (), ip4_sv_reass_custom_context_node.index, node_index);
1700 : }
1701 :
1702 : int
1703 46 : ip4_sv_reass_output_enable_disable_with_refcnt (u32 sw_if_index,
1704 : int is_enable)
1705 : {
1706 46 : ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
1707 46 : vec_validate (rm->output_feature_use_refcount_per_intf, sw_if_index);
1708 46 : if (is_enable)
1709 : {
1710 23 : if (!rm->output_feature_use_refcount_per_intf[sw_if_index])
1711 : {
1712 23 : ++rm->output_feature_use_refcount_per_intf[sw_if_index];
1713 23 : return vnet_feature_enable_disable ("ip4-output",
1714 : "ip4-sv-reassembly-output-feature",
1715 : sw_if_index, 1, 0, 0);
1716 : }
1717 0 : ++rm->output_feature_use_refcount_per_intf[sw_if_index];
1718 : }
1719 : else
1720 : {
1721 23 : if (rm->output_feature_use_refcount_per_intf[sw_if_index])
1722 23 : --rm->output_feature_use_refcount_per_intf[sw_if_index];
1723 23 : if (!rm->output_feature_use_refcount_per_intf[sw_if_index])
1724 23 : return vnet_feature_enable_disable ("ip4-output",
1725 : "ip4-sv-reassembly-output-feature",
1726 : sw_if_index, 0, 0, 0);
1727 : }
1728 0 : return 0;
1729 : }
1730 : #endif
1731 :
1732 : /*
1733 : * fd.io coding-style-patch-verification: ON
1734 : *
1735 : * Local Variables:
1736 : * eval: (c-set-style "gnu")
1737 : * End:
1738 : */
|