Line data Source code
1 : /*
2 : * Copyright (c) 2017 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 : /*
16 : * ioam_cache_tunnel_select_node.c
17 : * This file implements anycast server selection using ioam data
18 : * attached to anycast service selection.
19 : * Anycast service is reachable via multiple servers reachable
20 : * over SR tunnels.
21 : * Works with TCP Anycast application.
22 : * Cache entry is created when TCP SYN is received for anycast destination.
23 : * Response TCP SYN ACKs for anycast service is compared and selected
24 : * response is forwarded.
25 : * The functionality is introduced via graph nodes that are hooked into
26 : * vnet graph via classifier configs like below:
27 : *
28 : * Enable anycast service selection:
29 : * set ioam ip6 sr-tunnel-select oneway
30 : *
31 : * Enable following classifier on the anycast service client facing interface
32 : * e.g. anycast service is db06::06 then:
33 : * classify session acl-hit-next ip6-node ip6-add-syn-hop-by-hop table-index 0 match l3
34 : * ip6 dst db06::06 ioam-encap anycast
35 : *
36 : * Enable following classifier on the interfaces facing the server of anycast service:
37 : * classify session acl-hit-next ip6-node ip6-lookup table-index 0 match l3
38 : * ip6 src db06::06 ioam-decap anycast
39 : *
40 : */
41 : #include <vlib/vlib.h>
42 : #include <vnet/vnet.h>
43 : #include <vppinfra/error.h>
44 : #include <vnet/ip/ip.h>
45 : #include <vnet/srv6/sr.h>
46 : #include <ioam/ip6/ioam_cache.h>
47 : #include <vnet/ip/ip6_hop_by_hop.h>
48 : #include <vnet/ip/ip6_hop_by_hop_packet.h>
49 : #include <vnet/ip/ip6_inlines.h>
50 :
51 : typedef struct
52 : {
53 : u32 next_index;
54 : u32 flow_label;
55 : } cache_ts_trace_t;
56 :
57 : /* packet trace format function */
58 : static u8 *
59 0 : format_cache_ts_trace (u8 * s, va_list * args)
60 : {
61 0 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 0 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 0 : cache_ts_trace_t *t = va_arg (*args, cache_ts_trace_t *);
64 :
65 0 : s = format (s, "CACHE: flow_label %d, next index %d",
66 : t->flow_label, t->next_index);
67 0 : return s;
68 : }
69 :
70 : #define foreach_cache_ts_error \
71 : _(RECORDED, "ip6 iOAM headers cached")
72 :
73 : typedef enum
74 : {
75 : #define _(sym,str) CACHE_TS_ERROR_##sym,
76 : foreach_cache_ts_error
77 : #undef _
78 : CACHE_TS_N_ERROR,
79 : } cache_ts_error_t;
80 :
81 : static char *cache_ts_error_strings[] = {
82 : #define _(sym,string) string,
83 : foreach_cache_ts_error
84 : #undef _
85 : };
86 :
87 : typedef enum
88 : {
89 : IOAM_CACHE_TS_NEXT_POP_HBYH,
90 : IOAM_CACHE_TS_ERROR_NEXT_DROP,
91 : IOAM_CACHE_TS_N_NEXT,
92 : } cache_ts_next_t;
93 :
94 : static uword
95 0 : ip6_ioam_cache_ts_node_fn (vlib_main_t * vm,
96 : vlib_node_runtime_t * node, vlib_frame_t * frame)
97 : {
98 0 : ioam_cache_main_t *cm = &ioam_cache_main;
99 : u32 n_left_from, *from, *to_next;
100 : cache_ts_next_t next_index;
101 0 : u32 recorded = 0;
102 :
103 0 : from = vlib_frame_vector_args (frame);
104 0 : n_left_from = frame->n_vectors;
105 0 : next_index = node->cached_next_index;
106 :
107 0 : while (n_left_from > 0)
108 : {
109 : u32 n_left_to_next;
110 :
111 0 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
112 : // TODO: dual loop
113 0 : while (n_left_from > 0 && n_left_to_next > 0)
114 : {
115 : u32 bi0;
116 : vlib_buffer_t *p0;
117 0 : u32 next0 = IOAM_CACHE_TS_NEXT_POP_HBYH;
118 : ip6_header_t *ip0;
119 : ip6_hop_by_hop_header_t *hbh0, *hbh_cmp;
120 : tcp_header_t *tcp0;
121 : u32 tcp_offset0;
122 0 : u32 cache_ts_index = 0;
123 0 : u8 cache_thread_id = 0;
124 0 : int result = 0;
125 0 : int skip = 0;
126 :
127 0 : bi0 = from[0];
128 0 : from += 1;
129 0 : n_left_from -= 1;
130 :
131 0 : p0 = vlib_get_buffer (vm, bi0);
132 0 : ip0 = vlib_buffer_get_current (p0);
133 0 : if (IP_PROTOCOL_TCP ==
134 0 : ip6_locate_header (p0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
135 : {
136 0 : tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
137 0 : if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
138 0 : (tcp0->flags & TCP_FLAG_ACK) == TCP_FLAG_ACK)
139 : {
140 : /* Look up and compare */
141 0 : hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
142 :
143 0 : if (0 == ioam_cache_ts_lookup (ip0,
144 0 : hbh0->protocol,
145 0 : clib_net_to_host_u16
146 0 : (tcp0->src_port),
147 0 : clib_net_to_host_u16
148 0 : (tcp0->dst_port),
149 : clib_net_to_host_u32
150 : (tcp0->ack_number), &hbh_cmp,
151 : &cache_ts_index,
152 : &cache_thread_id, 1))
153 : {
154 : /* response seen */
155 0 : result = -1;
156 0 : if (hbh_cmp)
157 : result =
158 0 : ip6_ioam_analyse_compare_path_delay (hbh0, hbh_cmp,
159 0 : cm->criteria_oneway);
160 0 : if (result >= 0)
161 : {
162 : /* current syn/ack is worse than the earlier: Drop */
163 0 : next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
164 : /* Check if all responses are received or time has exceeded
165 : send cached response if yes */
166 0 : ioam_cache_ts_check_and_send (cache_thread_id,
167 : cache_ts_index);
168 : }
169 : else
170 : {
171 : /* Update cache with this buffer */
172 : /* If successfully updated then skip sending it */
173 0 : if (0 ==
174 : (result =
175 0 : ioam_cache_ts_update (cache_thread_id,
176 : cache_ts_index, bi0,
177 : hbh0)))
178 : {
179 0 : skip = 1;
180 : }
181 : else
182 0 : next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
183 : }
184 : }
185 : else
186 : {
187 0 : next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
188 : }
189 : }
190 0 : else if ((tcp0->flags & TCP_FLAG_RST) == TCP_FLAG_RST)
191 : {
192 : /* Look up and compare */
193 0 : hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
194 0 : if (0 == ioam_cache_ts_lookup (ip0, hbh0->protocol, clib_net_to_host_u16 (tcp0->src_port), clib_net_to_host_u16 (tcp0->dst_port), clib_net_to_host_u32 (tcp0->ack_number), &hbh_cmp, &cache_ts_index, &cache_thread_id, 1)) //response seen
195 : {
196 0 : next0 = IOAM_CACHE_TS_ERROR_NEXT_DROP;
197 0 : if (hbh_cmp)
198 0 : ioam_cache_ts_check_and_send (cache_thread_id,
199 : cache_ts_index);
200 : }
201 :
202 : }
203 : }
204 0 : if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
205 : {
206 0 : if (p0->flags & VLIB_BUFFER_IS_TRACED)
207 : {
208 : cache_ts_trace_t *t =
209 0 : vlib_add_trace (vm, node, p0, sizeof (*t));
210 0 : t->flow_label =
211 0 : clib_net_to_host_u32
212 : (ip0->ip_version_traffic_class_and_flow_label);
213 0 : t->next_index = next0;
214 : }
215 : }
216 : /* verify speculative enqueue, maybe switch current next frame */
217 0 : if (!skip)
218 : {
219 0 : to_next[0] = bi0;
220 0 : to_next += 1;
221 0 : n_left_to_next -= 1;
222 0 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
223 : to_next, n_left_to_next,
224 : bi0, next0);
225 : }
226 : }
227 :
228 0 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
229 : }
230 0 : vlib_node_increment_counter (vm, ioam_cache_ts_node.index,
231 : CACHE_TS_ERROR_RECORDED, recorded);
232 0 : return frame->n_vectors;
233 : }
234 :
235 : /*
236 : * Node for IP6 iOAM header cache
237 : */
238 : /* *INDENT-OFF* */
239 114668 : VLIB_REGISTER_NODE (ioam_cache_ts_node) =
240 : {
241 : .function = ip6_ioam_cache_ts_node_fn,
242 : .name = "ip6-ioam-tunnel-select",
243 : .vector_size = sizeof (u32),
244 : .format_trace = format_cache_ts_trace,
245 : .type = VLIB_NODE_TYPE_INTERNAL,
246 : .n_errors = ARRAY_LEN (cache_ts_error_strings),
247 : .error_strings = cache_ts_error_strings,
248 : .n_next_nodes = IOAM_CACHE_TS_N_NEXT,
249 : /* edit / add dispositions here */
250 : .next_nodes =
251 : {
252 : [IOAM_CACHE_TS_NEXT_POP_HBYH] = "ip6-pop-hop-by-hop",
253 : [IOAM_CACHE_TS_ERROR_NEXT_DROP] = "error-drop",
254 : },
255 : };
256 : /* *INDENT-ON* */
257 :
258 : typedef struct
259 : {
260 : u32 next_index;
261 : } ip6_reset_ts_hbh_trace_t;
262 :
263 : /* packet trace format function */
264 : static u8 *
265 0 : format_ip6_reset_ts_hbh_trace (u8 * s, va_list * args)
266 : {
267 0 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
268 0 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
269 0 : ip6_reset_ts_hbh_trace_t *t = va_arg (*args,
270 : ip6_reset_ts_hbh_trace_t *);
271 :
272 : s =
273 0 : format (s, "IP6_IOAM_RESET_TUNNEL_SELECT_HBH: next index %d",
274 : t->next_index);
275 0 : return s;
276 : }
277 :
278 : #define foreach_ip6_reset_ts_hbh_error \
279 : _(PROCESSED, "iOAM Syn/Ack Pkts processed") \
280 : _(SAVED, "iOAM Syn Pkts state saved") \
281 : _(REMOVED, "iOAM Syn/Ack Pkts state removed")
282 :
283 : typedef enum
284 : {
285 : #define _(sym,str) IP6_RESET_TS_HBH_ERROR_##sym,
286 : foreach_ip6_reset_ts_hbh_error
287 : #undef _
288 : IP6_RESET_TS_HBH_N_ERROR,
289 : } ip6_reset_ts_hbh_error_t;
290 :
291 : static char *ip6_reset_ts_hbh_error_strings[] = {
292 : #define _(sym,string) string,
293 : foreach_ip6_reset_ts_hbh_error
294 : #undef _
295 : };
296 :
297 : #define foreach_ip6_ioam_cache_ts_input_next \
298 : _(IP6_LOOKUP, "ip6-lookup") \
299 : _(DROP, "error-drop")
300 :
301 : typedef enum
302 : {
303 : #define _(s,n) IP6_IOAM_CACHE_TS_INPUT_NEXT_##s,
304 : foreach_ip6_ioam_cache_ts_input_next
305 : #undef _
306 : IP6_IOAM_CACHE_TS_INPUT_N_NEXT,
307 : } ip6_ioam_cache_ts_input_next_t;
308 :
309 :
310 2300 : VLIB_NODE_FN (ip6_reset_ts_hbh_node) (vlib_main_t * vm,
311 : vlib_node_runtime_t * node,
312 : vlib_frame_t * frame)
313 : {
314 0 : ioam_cache_main_t *cm = &ioam_cache_main;
315 : u32 n_left_from, *from, *to_next;
316 : ip_lookup_next_t next_index;
317 0 : u32 processed = 0, cache_ts_added = 0;
318 : u64 now;
319 0 : u8 *rewrite = cm->rewrite;
320 0 : u32 rewrite_length = vec_len (rewrite);
321 0 : ioam_e2e_cache_option_t *e2e = 0;
322 0 : u8 no_of_responses = cm->wait_for_responses;
323 :
324 0 : from = vlib_frame_vector_args (frame);
325 0 : n_left_from = frame->n_vectors;
326 0 : next_index = node->cached_next_index;
327 :
328 0 : while (n_left_from > 0)
329 : {
330 : u32 n_left_to_next;
331 :
332 0 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
333 :
334 0 : now = vlib_time_now (vm);
335 0 : while (n_left_from >= 4 && n_left_to_next >= 2)
336 : {
337 : u32 bi0, bi1;
338 : vlib_buffer_t *b0, *b1;
339 : u32 next0, next1;
340 : ip6_header_t *ip0, *ip1;
341 : tcp_header_t *tcp0, *tcp1;
342 : u32 tcp_offset0, tcp_offset1;
343 : ip6_hop_by_hop_header_t *hbh0, *hbh1;
344 : u64 *copy_src0, *copy_dst0, *copy_src1, *copy_dst1;
345 : u16 new_l0, new_l1;
346 0 : u32 pool_index0 = 0, pool_index1 = 0;
347 :
348 0 : next0 = next1 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
349 : /* Prefetch next iteration. */
350 : {
351 : vlib_buffer_t *p2, *p3;
352 :
353 0 : p2 = vlib_get_buffer (vm, from[2]);
354 0 : p3 = vlib_get_buffer (vm, from[3]);
355 :
356 0 : vlib_prefetch_buffer_header (p2, LOAD);
357 0 : vlib_prefetch_buffer_header (p3, LOAD);
358 0 : CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
359 0 : CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
360 : }
361 :
362 :
363 : /* speculatively enqueue b0 to the current next frame */
364 0 : to_next[0] = bi0 = from[0];
365 0 : to_next[1] = bi1 = from[1];
366 0 : from += 2;
367 0 : to_next += 2;
368 0 : n_left_from -= 2;
369 0 : n_left_to_next -= 2;
370 :
371 0 : b0 = vlib_get_buffer (vm, bi0);
372 0 : b1 = vlib_get_buffer (vm, bi1);
373 :
374 0 : ip0 = vlib_buffer_get_current (b0);
375 0 : ip1 = vlib_buffer_get_current (b1);
376 :
377 0 : if (IP_PROTOCOL_TCP !=
378 0 : ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
379 : {
380 0 : goto NEXT00;
381 : }
382 0 : tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
383 0 : if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
384 0 : (tcp0->flags & TCP_FLAG_ACK) == 0)
385 : {
386 0 : if (no_of_responses > 0)
387 : {
388 : /* Create TS select entry */
389 0 : if (0 == ioam_cache_ts_add (ip0,
390 0 : clib_net_to_host_u16
391 0 : (tcp0->src_port),
392 0 : clib_net_to_host_u16
393 0 : (tcp0->dst_port),
394 0 : clib_net_to_host_u32
395 : (tcp0->seq_number) + 1,
396 : no_of_responses, now,
397 : vm->thread_index, &pool_index0))
398 : {
399 0 : cache_ts_added++;
400 : }
401 : }
402 0 : copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
403 0 : copy_src0 = (u64 *) ip0;
404 :
405 0 : copy_dst0[0] = copy_src0[0];
406 0 : copy_dst0[1] = copy_src0[1];
407 0 : copy_dst0[2] = copy_src0[2];
408 0 : copy_dst0[3] = copy_src0[3];
409 0 : copy_dst0[4] = copy_src0[4];
410 :
411 0 : vlib_buffer_advance (b0, -(word) rewrite_length);
412 0 : ip0 = vlib_buffer_get_current (b0);
413 :
414 0 : hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
415 : /* $$$ tune, rewrite_length is a multiple of 8 */
416 0 : clib_memcpy_fast (hbh0, rewrite, rewrite_length);
417 0 : e2e =
418 : (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
419 0 : cm->rewrite_pool_index_offset);
420 0 : e2e->pool_id = (u8) vm->thread_index;
421 0 : e2e->pool_index = pool_index0;
422 0 : ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
423 : ((u8 *) e2e +
424 : sizeof (ioam_e2e_cache_option_t)),
425 : &cm->sr_localsid_ts);
426 : /* Patch the protocol chain, insert the h-b-h (type 0) header */
427 0 : hbh0->protocol = ip0->protocol;
428 0 : ip0->protocol = 0;
429 0 : new_l0 =
430 0 : clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
431 0 : ip0->payload_length = clib_host_to_net_u16 (new_l0);
432 0 : processed++;
433 : }
434 :
435 0 : NEXT00:
436 0 : if (IP_PROTOCOL_TCP !=
437 0 : ip6_locate_header (b1, ip1, IP_PROTOCOL_TCP, &tcp_offset1))
438 : {
439 0 : goto TRACE00;
440 : }
441 0 : tcp1 = (tcp_header_t *) ((u8 *) ip1 + tcp_offset1);
442 0 : if ((tcp1->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
443 0 : (tcp1->flags & TCP_FLAG_ACK) == 0)
444 : {
445 0 : if (no_of_responses > 0)
446 : {
447 : /* Create TS select entry */
448 0 : if (0 == ioam_cache_ts_add (ip1,
449 0 : clib_net_to_host_u16
450 0 : (tcp1->src_port),
451 0 : clib_net_to_host_u16
452 0 : (tcp1->dst_port),
453 0 : clib_net_to_host_u32
454 : (tcp1->seq_number) + 1,
455 : no_of_responses, now,
456 : vm->thread_index, &pool_index1))
457 : {
458 0 : cache_ts_added++;
459 : }
460 : }
461 :
462 0 : copy_dst1 = (u64 *) (((u8 *) ip1) - rewrite_length);
463 0 : copy_src1 = (u64 *) ip1;
464 :
465 0 : copy_dst1[0] = copy_src1[0];
466 0 : copy_dst1[1] = copy_src1[1];
467 0 : copy_dst1[2] = copy_src1[2];
468 0 : copy_dst1[3] = copy_src1[3];
469 0 : copy_dst1[4] = copy_src1[4];
470 :
471 0 : vlib_buffer_advance (b1, -(word) rewrite_length);
472 0 : ip1 = vlib_buffer_get_current (b1);
473 :
474 0 : hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
475 : /* $$$ tune, rewrite_length is a multiple of 8 */
476 0 : clib_memcpy_fast (hbh1, rewrite, rewrite_length);
477 0 : e2e =
478 : (ioam_e2e_cache_option_t *) ((u8 *) hbh1 +
479 0 : cm->rewrite_pool_index_offset);
480 0 : e2e->pool_id = (u8) vm->thread_index;
481 0 : e2e->pool_index = pool_index1;
482 0 : ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
483 : ((u8 *) e2e +
484 : sizeof (ioam_e2e_cache_option_t)),
485 : &cm->sr_localsid_ts);
486 : /* Patch the protocol chain, insert the h-b-h (type 0) header */
487 0 : hbh1->protocol = ip1->protocol;
488 0 : ip1->protocol = 0;
489 0 : new_l1 =
490 0 : clib_net_to_host_u16 (ip1->payload_length) + rewrite_length;
491 0 : ip1->payload_length = clib_host_to_net_u16 (new_l1);
492 0 : processed++;
493 : }
494 :
495 0 : TRACE00:
496 0 : if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
497 : {
498 0 : if (b0->flags & VLIB_BUFFER_IS_TRACED)
499 : {
500 : ip6_reset_ts_hbh_trace_t *t =
501 0 : vlib_add_trace (vm, node, b0, sizeof (*t));
502 0 : t->next_index = next0;
503 : }
504 0 : if (b1->flags & VLIB_BUFFER_IS_TRACED)
505 : {
506 : ip6_reset_ts_hbh_trace_t *t =
507 0 : vlib_add_trace (vm, node, b1, sizeof (*t));
508 0 : t->next_index = next1;
509 : }
510 :
511 : }
512 :
513 : /* verify speculative enqueue, maybe switch current next frame */
514 0 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
515 : to_next, n_left_to_next,
516 : bi0, bi1, next0, next1);
517 : }
518 0 : while (n_left_from > 0 && n_left_to_next > 0)
519 : {
520 : u32 bi0;
521 : vlib_buffer_t *b0;
522 : u32 next0;
523 : ip6_header_t *ip0;
524 : tcp_header_t *tcp0;
525 : u32 tcp_offset0;
526 : ip6_hop_by_hop_header_t *hbh0;
527 : u64 *copy_src0, *copy_dst0;
528 : u16 new_l0;
529 0 : u32 pool_index0 = 0;
530 :
531 0 : next0 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
532 : /* speculatively enqueue b0 to the current next frame */
533 0 : bi0 = from[0];
534 0 : to_next[0] = bi0;
535 0 : from += 1;
536 0 : to_next += 1;
537 0 : n_left_from -= 1;
538 0 : n_left_to_next -= 1;
539 :
540 0 : b0 = vlib_get_buffer (vm, bi0);
541 :
542 0 : ip0 = vlib_buffer_get_current (b0);
543 0 : if (IP_PROTOCOL_TCP !=
544 0 : ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
545 : {
546 0 : goto TRACE0;
547 : }
548 0 : tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
549 0 : if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
550 0 : (tcp0->flags & TCP_FLAG_ACK) == 0)
551 : {
552 0 : if (no_of_responses > 0)
553 : {
554 : /* Create TS select entry */
555 0 : if (0 == ioam_cache_ts_add (ip0,
556 0 : clib_net_to_host_u16
557 0 : (tcp0->src_port),
558 0 : clib_net_to_host_u16
559 0 : (tcp0->dst_port),
560 0 : clib_net_to_host_u32
561 : (tcp0->seq_number) + 1,
562 : no_of_responses, now,
563 : vm->thread_index, &pool_index0))
564 : {
565 0 : cache_ts_added++;
566 : }
567 : }
568 0 : copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
569 0 : copy_src0 = (u64 *) ip0;
570 :
571 0 : copy_dst0[0] = copy_src0[0];
572 0 : copy_dst0[1] = copy_src0[1];
573 0 : copy_dst0[2] = copy_src0[2];
574 0 : copy_dst0[3] = copy_src0[3];
575 0 : copy_dst0[4] = copy_src0[4];
576 :
577 0 : vlib_buffer_advance (b0, -(word) rewrite_length);
578 0 : ip0 = vlib_buffer_get_current (b0);
579 :
580 0 : hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
581 : /* $$$ tune, rewrite_length is a multiple of 8 */
582 0 : clib_memcpy_fast (hbh0, rewrite, rewrite_length);
583 0 : e2e =
584 : (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
585 0 : cm->rewrite_pool_index_offset);
586 0 : e2e->pool_id = (u8) vm->thread_index;
587 0 : e2e->pool_index = pool_index0;
588 0 : ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
589 : ((u8 *) e2e +
590 : sizeof (ioam_e2e_cache_option_t)),
591 : &cm->sr_localsid_ts);
592 : /* Patch the protocol chain, insert the h-b-h (type 0) header */
593 0 : hbh0->protocol = ip0->protocol;
594 0 : ip0->protocol = 0;
595 0 : new_l0 =
596 0 : clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
597 0 : ip0->payload_length = clib_host_to_net_u16 (new_l0);
598 0 : processed++;
599 : }
600 0 : TRACE0:
601 0 : if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
602 : && (b0->flags & VLIB_BUFFER_IS_TRACED)))
603 : {
604 : ip6_reset_ts_hbh_trace_t *t =
605 0 : vlib_add_trace (vm, node, b0, sizeof (*t));
606 0 : t->next_index = next0;
607 : }
608 :
609 : /* verify speculative enqueue, maybe switch current next frame */
610 0 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
611 : to_next, n_left_to_next,
612 : bi0, next0);
613 : }
614 :
615 0 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
616 : }
617 :
618 0 : vlib_node_increment_counter (vm, cm->ip6_reset_ts_hbh_node_index,
619 : IP6_RESET_TS_HBH_ERROR_PROCESSED, processed);
620 0 : vlib_node_increment_counter (vm, cm->ip6_reset_ts_hbh_node_index,
621 : IP6_RESET_TS_HBH_ERROR_SAVED, cache_ts_added);
622 :
623 0 : return frame->n_vectors;
624 : }
625 :
626 : /* *INDENT-OFF* */
627 114668 : VLIB_REGISTER_NODE (ip6_reset_ts_hbh_node) =
628 : {
629 : .name = "ip6-add-syn-hop-by-hop",
630 : .vector_size = sizeof (u32),
631 : .format_trace = format_ip6_reset_ts_hbh_trace,
632 : .type = VLIB_NODE_TYPE_INTERNAL,
633 : .n_errors = ARRAY_LEN (ip6_reset_ts_hbh_error_strings),
634 : .error_strings = ip6_reset_ts_hbh_error_strings,
635 : /* See ip/lookup.h */
636 : .n_next_nodes = IP6_IOAM_CACHE_TS_INPUT_N_NEXT,
637 : .next_nodes =
638 : {
639 : #define _(s,n) [IP6_IOAM_CACHE_TS_INPUT_NEXT_##s] = n,
640 : foreach_ip6_ioam_cache_ts_input_next
641 : #undef _
642 : },
643 : };
644 :
645 : /* *INDENT-ON* */
646 :
647 : #ifndef CLIB_MARCH_VARIANT
648 : vlib_node_registration_t ioam_cache_ts_timer_tick_node;
649 : #endif /* CLIB_MARCH_VARIANT */
650 :
651 : typedef struct
652 : {
653 : u32 thread_index;
654 : } ioam_cache_ts_timer_tick_trace_t;
655 :
656 : /* packet trace format function */
657 : static u8 *
658 0 : format_ioam_cache_ts_timer_tick_trace (u8 * s, va_list * args)
659 : {
660 0 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
661 0 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
662 0 : ioam_cache_ts_timer_tick_trace_t *t =
663 : va_arg (*args, ioam_cache_ts_timer_tick_trace_t *);
664 :
665 0 : s = format (s, "IOAM_CACHE_TS_TIMER_TICK: thread index %d",
666 : t->thread_index);
667 0 : return s;
668 : }
669 :
670 : #define foreach_ioam_cache_ts_timer_tick_error \
671 : _(TIMER, "Timer events")
672 :
673 : typedef enum
674 : {
675 : #define _(sym,str) IOAM_CACHE_TS_TIMER_TICK_ERROR_##sym,
676 : foreach_ioam_cache_ts_timer_tick_error
677 : #undef _
678 : IOAM_CACHE_TS_TIMER_TICK_N_ERROR,
679 : } ioam_cache_ts_timer_tick_error_t;
680 :
681 : static char *ioam_cache_ts_timer_tick_error_strings[] = {
682 : #define _(sym,string) string,
683 : foreach_ioam_cache_ts_timer_tick_error
684 : #undef _
685 : };
686 :
687 : #ifndef CLIB_MARCH_VARIANT
688 : void
689 0 : ioam_cache_ts_timer_node_enable (vlib_main_t * vm, u8 enable)
690 : {
691 0 : vlib_node_set_state (vm, ioam_cache_ts_timer_tick_node.index,
692 : enable ==
693 : 0 ? VLIB_NODE_STATE_DISABLED :
694 : VLIB_NODE_STATE_POLLING);
695 0 : }
696 :
697 : void
698 0 : expired_cache_ts_timer_callback (u32 * expired_timers)
699 : {
700 0 : ioam_cache_main_t *cm = &ioam_cache_main;
701 : int i;
702 : u32 pool_index;
703 0 : u32 thread_index = vlib_get_thread_index ();
704 0 : u32 count = 0;
705 :
706 0 : for (i = 0; i < vec_len (expired_timers); i++)
707 : {
708 : /* Get pool index and pool id */
709 0 : pool_index = expired_timers[i] & 0x0FFFFFFF;
710 :
711 : /* Handle expiration */
712 0 : ioam_cache_ts_send (thread_index, pool_index);
713 0 : count++;
714 : }
715 0 : vlib_node_increment_counter (cm->vlib_main,
716 : ioam_cache_ts_timer_tick_node.index,
717 : IOAM_CACHE_TS_TIMER_TICK_ERROR_TIMER, count);
718 0 : }
719 : #endif /* CLIB_MARCH_VARIANT */
720 :
721 : static uword
722 0 : ioam_cache_ts_timer_tick_node_fn (vlib_main_t * vm,
723 : vlib_node_runtime_t * node,
724 : vlib_frame_t * f)
725 : {
726 0 : ioam_cache_main_t *cm = &ioam_cache_main;
727 0 : u32 my_thread_index = vlib_get_thread_index ();
728 : struct timespec ts, tsrem;
729 :
730 0 : tw_timer_expire_timers_16t_2w_512sl (&cm->timer_wheels[my_thread_index],
731 : vlib_time_now (vm));
732 0 : ts.tv_sec = 0;
733 0 : ts.tv_nsec = 1000 * 1000 * IOAM_CACHE_TS_TICK;
734 0 : while (nanosleep (&ts, &tsrem) < 0)
735 : {
736 0 : ts = tsrem;
737 : }
738 :
739 0 : return 0;
740 : }
741 : /* *INDENT-OFF* */
742 114668 : VLIB_REGISTER_NODE (ioam_cache_ts_timer_tick_node) = {
743 : .function = ioam_cache_ts_timer_tick_node_fn,
744 : .name = "ioam-cache-ts-timer-tick",
745 : .format_trace = format_ioam_cache_ts_timer_tick_trace,
746 : .type = VLIB_NODE_TYPE_INPUT,
747 :
748 : .n_errors = ARRAY_LEN(ioam_cache_ts_timer_tick_error_strings),
749 : .error_strings = ioam_cache_ts_timer_tick_error_strings,
750 :
751 : .n_next_nodes = 1,
752 :
753 : .state = VLIB_NODE_STATE_DISABLED,
754 :
755 : /* edit / add dispositions here */
756 : .next_nodes = {
757 : [0] = "error-drop",
758 : },
759 : };
760 : /* *INDENT-ON* */
761 :
762 : /*
763 : * fd.io coding-style-patch-verification: ON
764 : *
765 : * Local Variables:
766 : * eval: (c-set-style "gnu")
767 : * End:
768 : */
|