Line data Source code
1 : /*
2 : * node.c - ipfix probe graph node
3 : *
4 : * Copyright (c) 2017 Cisco and/or its affiliates.
5 : * Licensed under the Apache License, Version 2.0 (the "License");
6 : * you may not use this file except in compliance with the License.
7 : * You may obtain a copy of the License at:
8 : *
9 : * http://www.apache.org/licenses/LICENSE-2.0
10 : *
11 : * Unless required by applicable law or agreed to in writing, software
12 : * distributed under the License is distributed on an "AS IS" BASIS,
13 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 : * See the License for the specific language governing permissions and
15 : * limitations under the License.
16 : */
17 : #include <vlib/vlib.h>
18 : #include <vnet/vnet.h>
19 : #include <vppinfra/crc32.h>
20 : #include <vppinfra/xxhash.h>
21 : #include <vppinfra/error.h>
22 : #include <flowprobe/flowprobe.h>
23 : #include <vnet/ip/ip6_packet.h>
24 : #include <vnet/udp/udp_local.h>
25 : #include <vlibmemory/api.h>
26 :
27 : static void flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e);
28 :
29 : /**
30 : * @file node.c
31 : * flow record generator graph node
32 : */
33 :
34 : typedef struct
35 : {
36 : /** interface handle */
37 : u32 rx_sw_if_index;
38 : u32 tx_sw_if_index;
39 : /** packet timestamp */
40 : u64 timestamp;
41 : /** size of the buffer */
42 : u16 buffer_size;
43 :
44 : /** L2 information */
45 : u8 src_mac[6];
46 : u8 dst_mac[6];
47 : /** Ethertype */
48 : u16 ethertype;
49 :
50 : /** L3 information */
51 : ip46_address_t src_address;
52 : ip46_address_t dst_address;
53 : u8 protocol;
54 : u8 tos;
55 :
56 : /** L4 information */
57 : u16 src_port;
58 : u16 dst_port;
59 :
60 : flowprobe_variant_t which;
61 : } flowprobe_trace_t;
62 :
63 : static char *flowprobe_variant_strings[] = {
64 : [FLOW_VARIANT_IP4] = "IP4",
65 : [FLOW_VARIANT_IP6] = "IP6",
66 : [FLOW_VARIANT_L2] = "L2",
67 : [FLOW_VARIANT_L2_IP4] = "L2-IP4",
68 : [FLOW_VARIANT_L2_IP6] = "L2-IP6",
69 : };
70 :
71 : /* packet trace format function */
72 : static u8 *
73 34 : format_flowprobe_trace (u8 * s, va_list * args)
74 : {
75 34 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
76 34 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
77 34 : flowprobe_trace_t *t = va_arg (*args, flowprobe_trace_t *);
78 34 : u32 indent = format_get_indent (s);
79 :
80 34 : s = format (s,
81 : "FLOWPROBE[%s]: rx_sw_if_index %d, tx_sw_if_index %d, "
82 34 : "timestamp %lld, size %d", flowprobe_variant_strings[t->which],
83 : t->rx_sw_if_index, t->tx_sw_if_index,
84 34 : t->timestamp, t->buffer_size);
85 :
86 34 : if (t->which == FLOW_VARIANT_L2)
87 4 : s = format (s, "\n%U -> %U", format_white_space, indent,
88 : format_ethernet_address, &t->src_mac,
89 : format_ethernet_address, &t->dst_mac);
90 :
91 34 : if (t->protocol > 0
92 26 : && (t->which == FLOW_VARIANT_L2_IP4 || t->which == FLOW_VARIANT_IP4
93 4 : || t->which == FLOW_VARIANT_L2_IP6 || t->which == FLOW_VARIANT_IP6))
94 : s =
95 26 : format (s, "\n%U%U: %U -> %U", format_white_space, indent,
96 26 : format_ip_protocol, t->protocol, format_ip46_address,
97 : &t->src_address, IP46_TYPE_ANY, format_ip46_address,
98 : &t->dst_address, IP46_TYPE_ANY);
99 34 : return s;
100 : }
101 :
102 : vlib_node_registration_t flowprobe_input_ip4_node;
103 : vlib_node_registration_t flowprobe_input_ip6_node;
104 : vlib_node_registration_t flowprobe_input_l2_node;
105 : vlib_node_registration_t flowprobe_output_ip4_node;
106 : vlib_node_registration_t flowprobe_output_ip6_node;
107 : vlib_node_registration_t flowprobe_output_l2_node;
108 :
109 : /* No counters at the moment */
110 : #define foreach_flowprobe_error \
111 : _(COLLISION, "Hash table collisions") \
112 : _(BUFFER, "Buffer allocation error") \
113 : _(EXPORTED_PACKETS, "Exported packets") \
114 : _(INPATH, "Exported packets in path")
115 :
116 : typedef enum
117 : {
118 : #define _(sym,str) FLOWPROBE_ERROR_##sym,
119 : foreach_flowprobe_error
120 : #undef _
121 : FLOWPROBE_N_ERROR,
122 : } flowprobe_error_t;
123 :
124 : static char *flowprobe_error_strings[] = {
125 : #define _(sym,string) string,
126 : foreach_flowprobe_error
127 : #undef _
128 : };
129 :
130 : typedef enum
131 : {
132 : FLOWPROBE_NEXT_DROP,
133 : FLOWPROBE_NEXT_IP4_LOOKUP,
134 : FLOWPROBE_N_NEXT,
135 : } flowprobe_next_t;
136 :
137 : #define FLOWPROBE_NEXT_NODES { \
138 : [FLOWPROBE_NEXT_DROP] = "error-drop", \
139 : [FLOWPROBE_NEXT_IP4_LOOKUP] = "ip4-lookup", \
140 : }
141 :
142 : static inline flowprobe_variant_t
143 58 : flowprobe_get_variant (flowprobe_variant_t which,
144 : flowprobe_record_t flags, u16 ethertype)
145 : {
146 58 : if (which == FLOW_VARIANT_L2
147 45 : && (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4))
148 43 : return ethertype == ETHERNET_TYPE_IP6 ? FLOW_VARIANT_L2_IP6 : ethertype ==
149 : ETHERNET_TYPE_IP4 ? FLOW_VARIANT_L2_IP4 : FLOW_VARIANT_L2;
150 15 : return which;
151 : }
152 :
153 : /*
154 : * NTP rfc868 : 2 208 988 800 corresponds to 00:00 1 Jan 1970 GMT
155 : */
156 : #define NTP_TIMESTAMP 2208988800LU
157 :
158 : static inline u32
159 57 : flowprobe_common_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
160 : {
161 57 : u16 start = offset;
162 :
163 : /* Ingress interface */
164 57 : u32 rx_if = clib_host_to_net_u32 (e->key.rx_sw_if_index);
165 57 : clib_memcpy_fast (to_b->data + offset, &rx_if, sizeof (rx_if));
166 57 : offset += sizeof (rx_if);
167 :
168 : /* Egress interface */
169 57 : u32 tx_if = clib_host_to_net_u32 (e->key.tx_sw_if_index);
170 57 : clib_memcpy_fast (to_b->data + offset, &tx_if, sizeof (tx_if));
171 57 : offset += sizeof (tx_if);
172 :
173 : /* Flow direction
174 : 0x00: ingress flow
175 : 0x01: egress flow */
176 57 : to_b->data[offset++] = (e->key.direction == FLOW_DIRECTION_TX);
177 :
178 : /* packet delta count */
179 57 : u64 packetdelta = clib_host_to_net_u64 (e->packetcount);
180 57 : clib_memcpy_fast (to_b->data + offset, &packetdelta, sizeof (u64));
181 57 : offset += sizeof (u64);
182 :
183 : /* flowStartNanoseconds */
184 57 : u32 t = clib_host_to_net_u32 (e->flow_start.sec + NTP_TIMESTAMP);
185 57 : clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
186 57 : offset += sizeof (u32);
187 57 : t = clib_host_to_net_u32 (e->flow_start.nsec);
188 57 : clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
189 57 : offset += sizeof (u32);
190 :
191 : /* flowEndNanoseconds */
192 57 : t = clib_host_to_net_u32 (e->flow_end.sec + NTP_TIMESTAMP);
193 57 : clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
194 57 : offset += sizeof (u32);
195 57 : t = clib_host_to_net_u32 (e->flow_end.nsec);
196 57 : clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
197 57 : offset += sizeof (u32);
198 :
199 57 : return offset - start;
200 : }
201 :
202 : static inline u32
203 45 : flowprobe_l2_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
204 : {
205 45 : u16 start = offset;
206 :
207 : /* src mac address */
208 45 : clib_memcpy_fast (to_b->data + offset, &e->key.src_mac, 6);
209 45 : offset += 6;
210 :
211 : /* dst mac address */
212 45 : clib_memcpy_fast (to_b->data + offset, &e->key.dst_mac, 6);
213 45 : offset += 6;
214 :
215 : /* ethertype */
216 45 : clib_memcpy_fast (to_b->data + offset, &e->key.ethertype, 2);
217 45 : offset += 2;
218 :
219 45 : return offset - start;
220 : }
221 :
222 : static inline u32
223 4 : flowprobe_l3_ip6_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
224 : {
225 4 : u16 start = offset;
226 :
227 : /* ip6 src address */
228 4 : clib_memcpy_fast (to_b->data + offset, &e->key.src_address,
229 : sizeof (ip6_address_t));
230 4 : offset += sizeof (ip6_address_t);
231 :
232 : /* ip6 dst address */
233 4 : clib_memcpy_fast (to_b->data + offset, &e->key.dst_address,
234 : sizeof (ip6_address_t));
235 4 : offset += sizeof (ip6_address_t);
236 :
237 : /* Protocol */
238 4 : to_b->data[offset++] = e->key.protocol;
239 :
240 : /* octetDeltaCount */
241 4 : u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
242 4 : clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
243 4 : offset += sizeof (u64);
244 :
245 4 : return offset - start;
246 : }
247 :
248 : static inline u32
249 39 : flowprobe_l3_ip4_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
250 : {
251 39 : u16 start = offset;
252 :
253 : /* ip4 src address */
254 39 : clib_memcpy_fast (to_b->data + offset, &e->key.src_address.ip4,
255 : sizeof (ip4_address_t));
256 39 : offset += sizeof (ip4_address_t);
257 :
258 : /* ip4 dst address */
259 39 : clib_memcpy_fast (to_b->data + offset, &e->key.dst_address.ip4,
260 : sizeof (ip4_address_t));
261 39 : offset += sizeof (ip4_address_t);
262 :
263 : /* Protocol */
264 39 : to_b->data[offset++] = e->key.protocol;
265 :
266 : /* octetDeltaCount */
267 39 : u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
268 39 : clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
269 39 : offset += sizeof (u64);
270 :
271 39 : return offset - start;
272 : }
273 :
274 : static inline u32
275 43 : flowprobe_l4_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
276 : {
277 43 : u16 start = offset;
278 :
279 : /* src port */
280 43 : clib_memcpy_fast (to_b->data + offset, &e->key.src_port, 2);
281 43 : offset += 2;
282 :
283 : /* dst port */
284 43 : clib_memcpy_fast (to_b->data + offset, &e->key.dst_port, 2);
285 43 : offset += 2;
286 :
287 : /* tcp control bits */
288 43 : u16 control_bits = htons (e->prot.tcp.flags);
289 43 : clib_memcpy_fast (to_b->data + offset, &control_bits, 2);
290 43 : offset += 2;
291 :
292 43 : return offset - start;
293 : }
294 :
295 : static inline u32
296 10 : flowprobe_hash (flowprobe_key_t * k)
297 : {
298 10 : flowprobe_main_t *fm = &flowprobe_main;
299 10 : u32 h = 0;
300 :
301 : #ifdef clib_crc32c_uses_intrinsics
302 10 : h = clib_crc32c ((u8 *) k, sizeof (*k));
303 : #else
304 : int i;
305 : u64 tmp = 0;
306 : for (i = 0; i < sizeof (*k) / 8; i++)
307 : tmp ^= ((u64 *) k)[i];
308 :
309 : h = clib_xxhash (tmp);
310 : #endif
311 :
312 10 : return h >> (32 - fm->ht_log2len);
313 : }
314 :
315 : flowprobe_entry_t *
316 4 : flowprobe_lookup (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex,
317 : bool * collision)
318 : {
319 4 : flowprobe_main_t *fm = &flowprobe_main;
320 : flowprobe_entry_t *e;
321 : u32 h;
322 :
323 4 : h = (fm->active_timer) ? flowprobe_hash (k) : 0;
324 :
325 : /* Lookup in the flow state pool */
326 4 : *poolindex = fm->hash_per_worker[my_cpu_number][h];
327 4 : if (*poolindex != ~0)
328 : {
329 1 : e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], *poolindex);
330 1 : if (e)
331 : {
332 : /* Verify key or report collision */
333 1 : if (memcmp (k, &e->key, sizeof (flowprobe_key_t)))
334 0 : *collision = true;
335 1 : return e;
336 : }
337 : }
338 :
339 3 : return 0;
340 : }
341 :
342 : flowprobe_entry_t *
343 3 : flowprobe_create (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex)
344 : {
345 3 : flowprobe_main_t *fm = &flowprobe_main;
346 : u32 h;
347 :
348 : flowprobe_entry_t *e;
349 :
350 : /* Get my index */
351 3 : h = (fm->active_timer) ? flowprobe_hash (k) : 0;
352 :
353 3 : pool_get (fm->pool_per_worker[my_cpu_number], e);
354 3 : *poolindex = e - fm->pool_per_worker[my_cpu_number];
355 3 : fm->hash_per_worker[my_cpu_number][h] = *poolindex;
356 :
357 3 : e->key = *k;
358 :
359 3 : if (fm->passive_timer > 0)
360 : {
361 3 : e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
362 3 : (fm->timers_per_worker[my_cpu_number], *poolindex, 0,
363 3 : fm->passive_timer);
364 : }
365 3 : return e;
366 : }
367 :
368 : static inline void
369 58 : add_to_flow_record_state (vlib_main_t *vm, vlib_node_runtime_t *node,
370 : flowprobe_main_t *fm, vlib_buffer_t *b,
371 : timestamp_nsec_t timestamp, u16 length,
372 : flowprobe_variant_t which,
373 : flowprobe_direction_t direction,
374 : flowprobe_trace_t *t)
375 : {
376 58 : if (fm->disabled)
377 0 : return;
378 :
379 58 : ASSERT (direction == FLOW_DIRECTION_RX || direction == FLOW_DIRECTION_TX);
380 :
381 58 : u32 my_cpu_number = vm->thread_index;
382 58 : u16 octets = 0;
383 :
384 58 : flowprobe_record_t flags = fm->context[which].flags;
385 58 : bool collect_ip4 = false, collect_ip6 = false;
386 58 : ASSERT (b);
387 58 : ethernet_header_t *eth = ethernet_buffer_get_header (b);
388 58 : u16 ethertype = clib_net_to_host_u16 (eth->type);
389 58 : u16 l2_hdr_sz = sizeof (ethernet_header_t);
390 : /* *INDENT-OFF* */
391 58 : flowprobe_key_t k = {};
392 : /* *INDENT-ON* */
393 58 : ip4_header_t *ip4 = 0;
394 58 : ip6_header_t *ip6 = 0;
395 58 : udp_header_t *udp = 0;
396 58 : tcp_header_t *tcp = 0;
397 58 : u8 tcp_flags = 0;
398 :
399 58 : if (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4)
400 : {
401 52 : collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
402 52 : collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
403 : }
404 :
405 58 : k.rx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
406 58 : k.tx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
407 :
408 58 : k.which = which;
409 58 : k.direction = direction;
410 :
411 58 : if (flags & FLOW_RECORD_L2)
412 : {
413 46 : clib_memcpy_fast (k.src_mac, eth->src_address, 6);
414 46 : clib_memcpy_fast (k.dst_mac, eth->dst_address, 6);
415 46 : k.ethertype = ethertype;
416 : }
417 58 : if (ethertype == ETHERNET_TYPE_VLAN)
418 : {
419 : /*VLAN TAG*/
420 0 : ethernet_vlan_header_tv_t *ethv =
421 : (ethernet_vlan_header_tv_t *) (&(eth->type));
422 : /*Q in Q possibility */
423 0 : while (clib_net_to_host_u16 (ethv->type) == ETHERNET_TYPE_VLAN)
424 : {
425 0 : ethv++;
426 0 : l2_hdr_sz += sizeof (ethernet_vlan_header_tv_t);
427 : }
428 0 : k.ethertype = ethertype = clib_net_to_host_u16 ((ethv)->type);
429 : }
430 58 : if (collect_ip6 && ethertype == ETHERNET_TYPE_IP6)
431 : {
432 6 : ip6 = (ip6_header_t *) (b->data + l2_hdr_sz);
433 6 : if (flags & FLOW_RECORD_L3)
434 : {
435 4 : k.src_address.as_u64[0] = ip6->src_address.as_u64[0];
436 4 : k.src_address.as_u64[1] = ip6->src_address.as_u64[1];
437 4 : k.dst_address.as_u64[0] = ip6->dst_address.as_u64[0];
438 4 : k.dst_address.as_u64[1] = ip6->dst_address.as_u64[1];
439 : }
440 6 : k.protocol = ip6->protocol;
441 6 : if (k.protocol == IP_PROTOCOL_UDP)
442 6 : udp = (udp_header_t *) (ip6 + 1);
443 0 : else if (k.protocol == IP_PROTOCOL_TCP)
444 0 : tcp = (tcp_header_t *) (ip6 + 1);
445 :
446 6 : octets = clib_net_to_host_u16 (ip6->payload_length)
447 : + sizeof (ip6_header_t);
448 : }
449 58 : if (collect_ip4 && ethertype == ETHERNET_TYPE_IP4)
450 : {
451 44 : ip4 = (ip4_header_t *) (b->data + l2_hdr_sz);
452 44 : if (flags & FLOW_RECORD_L3)
453 : {
454 40 : k.src_address.ip4.as_u32 = ip4->src_address.as_u32;
455 40 : k.dst_address.ip4.as_u32 = ip4->dst_address.as_u32;
456 : }
457 44 : k.protocol = ip4->protocol;
458 44 : if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_UDP)
459 39 : udp = (udp_header_t *) (ip4 + 1);
460 5 : else if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_TCP)
461 1 : tcp = (tcp_header_t *) (ip4 + 1);
462 :
463 44 : octets = clib_net_to_host_u16 (ip4->length);
464 : }
465 :
466 58 : if (udp)
467 : {
468 45 : k.src_port = udp->src_port;
469 45 : k.dst_port = udp->dst_port;
470 : }
471 13 : else if (tcp)
472 : {
473 1 : k.src_port = tcp->src_port;
474 1 : k.dst_port = tcp->dst_port;
475 1 : tcp_flags = tcp->flags;
476 : }
477 :
478 58 : if (t)
479 : {
480 38 : t->rx_sw_if_index = k.rx_sw_if_index;
481 38 : t->tx_sw_if_index = k.tx_sw_if_index;
482 38 : clib_memcpy_fast (t->src_mac, k.src_mac, 6);
483 38 : clib_memcpy_fast (t->dst_mac, k.dst_mac, 6);
484 38 : t->ethertype = k.ethertype;
485 38 : t->src_address.ip4.as_u32 = k.src_address.ip4.as_u32;
486 38 : t->dst_address.ip4.as_u32 = k.dst_address.ip4.as_u32;
487 38 : t->protocol = k.protocol;
488 38 : t->src_port = k.src_port;
489 38 : t->dst_port = k.dst_port;
490 38 : t->which = k.which;
491 : }
492 :
493 58 : flowprobe_entry_t *e = 0;
494 58 : f64 now = vlib_time_now (vm);
495 58 : if (fm->active_timer > 0)
496 : {
497 4 : u32 poolindex = ~0;
498 4 : bool collision = false;
499 :
500 4 : e = flowprobe_lookup (my_cpu_number, &k, &poolindex, &collision);
501 4 : if (collision)
502 : {
503 : /* Flush data and clean up entry for reuse. */
504 0 : if (e->packetcount)
505 0 : flowprobe_export_entry (vm, e);
506 0 : e->key = k;
507 0 : e->flow_start = timestamp;
508 0 : vlib_node_increment_counter (vm, node->node_index,
509 : FLOWPROBE_ERROR_COLLISION, 1);
510 : }
511 4 : if (!e) /* Create new entry */
512 : {
513 3 : e = flowprobe_create (my_cpu_number, &k, &poolindex);
514 3 : e->last_exported = now;
515 3 : e->flow_start = timestamp;
516 : }
517 : }
518 : else
519 : {
520 54 : e = &fm->stateless_entry[my_cpu_number];
521 54 : e->key = k;
522 : }
523 :
524 58 : if (e)
525 : {
526 : /* Updating entry */
527 58 : e->packetcount++;
528 58 : e->octetcount += octets;
529 58 : e->last_updated = now;
530 58 : e->flow_end = timestamp;
531 58 : e->prot.tcp.flags |= tcp_flags;
532 58 : if (fm->active_timer == 0
533 4 : || (now > e->last_exported + fm->active_timer))
534 54 : flowprobe_export_entry (vm, e);
535 : }
536 : }
537 :
538 : static u16
539 562 : flowprobe_get_headersize (void)
540 : {
541 562 : return sizeof (ip4_header_t) + sizeof (udp_header_t) +
542 : sizeof (ipfix_message_header_t) + sizeof (ipfix_set_header_t);
543 : }
544 :
545 : static void
546 424 : flowprobe_export_send (vlib_main_t * vm, vlib_buffer_t * b0,
547 : flowprobe_variant_t which)
548 : {
549 424 : flowprobe_main_t *fm = &flowprobe_main;
550 424 : flow_report_main_t *frm = &flow_report_main;
551 424 : ipfix_exporter_t *exp = pool_elt_at_index (frm->exporters, 0);
552 : vlib_frame_t *f;
553 : ip4_ipfix_template_packet_t *tp;
554 : ipfix_set_header_t *s;
555 : ipfix_message_header_t *h;
556 : ip4_header_t *ip;
557 : udp_header_t *udp;
558 424 : flowprobe_record_t flags = fm->context[which].flags;
559 424 : u32 my_cpu_number = vm->thread_index;
560 :
561 : /* Fill in header */
562 : flow_report_stream_t *stream;
563 :
564 : /* Nothing to send */
565 424 : if (fm->context[which].next_record_offset_per_worker[my_cpu_number] <=
566 424 : flowprobe_get_headersize ())
567 391 : return;
568 :
569 33 : u32 i, index = vec_len (exp->streams);
570 33 : for (i = 0; i < index; i++)
571 33 : if (exp->streams[i].domain_id == 1)
572 : {
573 33 : index = i;
574 33 : break;
575 : }
576 33 : if (i == vec_len (exp->streams))
577 : {
578 0 : vec_validate (exp->streams, index);
579 0 : exp->streams[index].domain_id = 1;
580 : }
581 33 : stream = &exp->streams[index];
582 :
583 33 : tp = vlib_buffer_get_current (b0);
584 33 : ip = (ip4_header_t *) & tp->ip4;
585 33 : udp = (udp_header_t *) (ip + 1);
586 33 : h = (ipfix_message_header_t *) (udp + 1);
587 33 : s = (ipfix_set_header_t *) (h + 1);
588 :
589 33 : ip->ip_version_and_header_length = 0x45;
590 33 : ip->ttl = 254;
591 33 : ip->protocol = IP_PROTOCOL_UDP;
592 33 : ip->flags_and_fragment_offset = 0;
593 33 : ip->src_address.as_u32 = exp->src_address.ip.ip4.as_u32;
594 33 : ip->dst_address.as_u32 = exp->ipfix_collector.ip.ip4.as_u32;
595 33 : udp->src_port = clib_host_to_net_u16 (stream->src_port);
596 33 : udp->dst_port = clib_host_to_net_u16 (exp->collector_port);
597 33 : udp->checksum = 0;
598 :
599 : /* FIXUP: message header export_time */
600 33 : h->export_time = (u32)
601 66 : (((f64) frm->unix_time_0) +
602 33 : (vlib_time_now (frm->vlib_main) - frm->vlib_time_0));
603 33 : h->export_time = clib_host_to_net_u32 (h->export_time);
604 33 : h->domain_id = clib_host_to_net_u32 (stream->domain_id);
605 :
606 : /* FIXUP: message header sequence_number */
607 33 : h->sequence_number = stream->sequence_number++;
608 33 : h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
609 :
610 66 : s->set_id_length = ipfix_set_id_length (fm->template_reports[flags],
611 33 : b0->current_length -
612 : (sizeof (*ip) + sizeof (*udp) +
613 : sizeof (*h)));
614 33 : h->version_length = version_length (b0->current_length -
615 : (sizeof (*ip) + sizeof (*udp)));
616 :
617 33 : ip->length = clib_host_to_net_u16 (b0->current_length);
618 :
619 33 : ip->checksum = ip4_header_checksum (ip);
620 33 : udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
621 :
622 33 : if (exp->udp_checksum)
623 : {
624 : /* RFC 7011 section 10.3.2. */
625 0 : udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
626 0 : if (udp->checksum == 0)
627 0 : udp->checksum = 0xffff;
628 : }
629 :
630 33 : ASSERT (ip4_header_checksum_is_valid (ip));
631 :
632 : /* Find or allocate a frame */
633 33 : f = fm->context[which].frames_per_worker[my_cpu_number];
634 33 : if (PREDICT_FALSE (f == 0))
635 : {
636 : u32 *to_next;
637 33 : f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
638 33 : fm->context[which].frames_per_worker[my_cpu_number] = f;
639 33 : u32 bi0 = vlib_get_buffer_index (vm, b0);
640 :
641 : /* Enqueue the buffer */
642 33 : to_next = vlib_frame_vector_args (f);
643 33 : to_next[0] = bi0;
644 33 : f->n_vectors = 1;
645 : }
646 :
647 33 : vlib_put_frame_to_node (vm, ip4_lookup_node.index, f);
648 33 : vlib_node_increment_counter (vm, flowprobe_output_l2_node.index,
649 : FLOWPROBE_ERROR_EXPORTED_PACKETS, 1);
650 :
651 33 : fm->context[which].frames_per_worker[my_cpu_number] = 0;
652 33 : fm->context[which].buffers_per_worker[my_cpu_number] = 0;
653 33 : fm->context[which].next_record_offset_per_worker[my_cpu_number] =
654 33 : flowprobe_get_headersize ();
655 : }
656 :
657 : static vlib_buffer_t *
658 477 : flowprobe_get_buffer (vlib_main_t * vm, flowprobe_variant_t which)
659 : {
660 477 : flowprobe_main_t *fm = &flowprobe_main;
661 477 : ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
662 : vlib_buffer_t *b0;
663 : u32 bi0;
664 477 : u32 my_cpu_number = vm->thread_index;
665 :
666 : /* Find or allocate a buffer */
667 477 : b0 = fm->context[which].buffers_per_worker[my_cpu_number];
668 :
669 : /* Need to allocate a buffer? */
670 477 : if (PREDICT_FALSE (b0 == 0))
671 : {
672 48 : if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
673 : {
674 0 : vlib_node_increment_counter (vm, flowprobe_output_l2_node.index,
675 : FLOWPROBE_ERROR_BUFFER, 1);
676 0 : return 0;
677 : }
678 :
679 : /* Initialize the buffer */
680 96 : b0 = fm->context[which].buffers_per_worker[my_cpu_number] =
681 48 : vlib_get_buffer (vm, bi0);
682 :
683 48 : b0->current_data = 0;
684 48 : b0->current_length = flowprobe_get_headersize ();
685 48 : b0->flags |=
686 : (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT);
687 48 : vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
688 48 : vnet_buffer (b0)->sw_if_index[VLIB_TX] = exp->fib_index;
689 48 : fm->context[which].next_record_offset_per_worker[my_cpu_number] =
690 48 : b0->current_length;
691 : }
692 :
693 477 : return b0;
694 : }
695 :
696 : static void
697 57 : flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e)
698 : {
699 57 : u32 my_cpu_number = vm->thread_index;
700 57 : flowprobe_main_t *fm = &flowprobe_main;
701 57 : ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
702 : vlib_buffer_t *b0;
703 57 : bool collect_ip4 = false, collect_ip6 = false;
704 57 : bool collect_l4 = false;
705 57 : flowprobe_variant_t which = e->key.which;
706 57 : flowprobe_record_t flags = fm->context[which].flags;
707 57 : u16 offset =
708 57 : fm->context[which].next_record_offset_per_worker[my_cpu_number];
709 :
710 57 : if (offset < flowprobe_get_headersize ())
711 0 : offset = flowprobe_get_headersize ();
712 :
713 57 : b0 = flowprobe_get_buffer (vm, which);
714 : /* No available buffer, what to do... */
715 57 : if (b0 == 0)
716 0 : return;
717 :
718 57 : if (flags & FLOW_RECORD_L3)
719 : {
720 45 : collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
721 45 : collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
722 : }
723 57 : if (flags & FLOW_RECORD_L4)
724 : {
725 45 : collect_l4 = (which != FLOW_VARIANT_L2);
726 : }
727 :
728 57 : offset += flowprobe_common_add (b0, e, offset);
729 :
730 57 : if (flags & FLOW_RECORD_L2)
731 45 : offset += flowprobe_l2_add (b0, e, offset);
732 57 : if (collect_ip6)
733 4 : offset += flowprobe_l3_ip6_add (b0, e, offset);
734 57 : if (collect_ip4)
735 39 : offset += flowprobe_l3_ip4_add (b0, e, offset);
736 57 : if (collect_l4)
737 43 : offset += flowprobe_l4_add (b0, e, offset);
738 :
739 : /* Reset per flow-export counters */
740 57 : e->packetcount = 0;
741 57 : e->octetcount = 0;
742 57 : e->last_exported = vlib_time_now (vm);
743 :
744 57 : b0->current_length = offset;
745 :
746 57 : fm->context[which].next_record_offset_per_worker[my_cpu_number] = offset;
747 : /* Time to flush the buffer? */
748 57 : if (offset + fm->template_size[flags] > exp->path_mtu)
749 4 : flowprobe_export_send (vm, b0, which);
750 : }
751 :
752 : uword
753 31 : flowprobe_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
754 : vlib_frame_t *frame, flowprobe_variant_t which,
755 : flowprobe_direction_t direction)
756 : {
757 : u32 n_left_from, *from, *to_next;
758 : flowprobe_next_t next_index;
759 31 : flowprobe_main_t *fm = &flowprobe_main;
760 : timestamp_nsec_t timestamp;
761 :
762 31 : unix_time_now_nsec_fraction (×tamp.sec, ×tamp.nsec);
763 :
764 31 : from = vlib_frame_vector_args (frame);
765 31 : n_left_from = frame->n_vectors;
766 31 : next_index = node->cached_next_index;
767 :
768 62 : while (n_left_from > 0)
769 : {
770 : u32 n_left_to_next;
771 :
772 31 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
773 :
774 41 : while (n_left_from >= 4 && n_left_to_next >= 2)
775 : {
776 10 : u32 next0 = FLOWPROBE_NEXT_DROP;
777 10 : u32 next1 = FLOWPROBE_NEXT_DROP;
778 : u16 len0, len1;
779 : u32 bi0, bi1;
780 : vlib_buffer_t *b0, *b1;
781 :
782 : /* Prefetch next iteration. */
783 : {
784 : vlib_buffer_t *p2, *p3;
785 :
786 10 : p2 = vlib_get_buffer (vm, from[2]);
787 10 : p3 = vlib_get_buffer (vm, from[3]);
788 :
789 10 : vlib_prefetch_buffer_header (p2, LOAD);
790 10 : vlib_prefetch_buffer_header (p3, LOAD);
791 :
792 10 : clib_prefetch_store (p2->data);
793 10 : clib_prefetch_store (p3->data);
794 : }
795 :
796 : /* speculatively enqueue b0 and b1 to the current next frame */
797 10 : to_next[0] = bi0 = from[0];
798 10 : to_next[1] = bi1 = from[1];
799 10 : from += 2;
800 10 : to_next += 2;
801 10 : n_left_from -= 2;
802 10 : n_left_to_next -= 2;
803 :
804 10 : b0 = vlib_get_buffer (vm, bi0);
805 10 : b1 = vlib_get_buffer (vm, bi1);
806 :
807 10 : vnet_feature_next (&next0, b0);
808 10 : vnet_feature_next (&next1, b1);
809 :
810 10 : len0 = vlib_buffer_length_in_chain (vm, b0);
811 10 : ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
812 10 : u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
813 :
814 10 : if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
815 10 : add_to_flow_record_state (
816 : vm, node, fm, b0, timestamp, len0,
817 10 : flowprobe_get_variant (which, fm->context[which].flags,
818 : ethertype0),
819 : direction, 0);
820 :
821 10 : len1 = vlib_buffer_length_in_chain (vm, b1);
822 10 : ethernet_header_t *eh1 = vlib_buffer_get_current (b1);
823 10 : u16 ethertype1 = clib_net_to_host_u16 (eh1->type);
824 :
825 10 : if (PREDICT_TRUE ((b1->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
826 10 : add_to_flow_record_state (
827 : vm, node, fm, b1, timestamp, len1,
828 10 : flowprobe_get_variant (which, fm->context[which].flags,
829 : ethertype1),
830 : direction, 0);
831 :
832 : /* verify speculative enqueues, maybe switch current next frame */
833 10 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
834 : to_next, n_left_to_next,
835 : bi0, bi1, next0, next1);
836 : }
837 :
838 69 : while (n_left_from > 0 && n_left_to_next > 0)
839 : {
840 : u32 bi0;
841 : vlib_buffer_t *b0;
842 38 : u32 next0 = FLOWPROBE_NEXT_DROP;
843 : u16 len0;
844 :
845 : /* speculatively enqueue b0 to the current next frame */
846 38 : bi0 = from[0];
847 38 : to_next[0] = bi0;
848 38 : from += 1;
849 38 : to_next += 1;
850 38 : n_left_from -= 1;
851 38 : n_left_to_next -= 1;
852 :
853 38 : b0 = vlib_get_buffer (vm, bi0);
854 :
855 38 : vnet_feature_next (&next0, b0);
856 :
857 38 : len0 = vlib_buffer_length_in_chain (vm, b0);
858 38 : ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
859 38 : u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
860 :
861 38 : if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
862 : {
863 38 : flowprobe_trace_t *t = 0;
864 38 : if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
865 : && (b0->flags & VLIB_BUFFER_IS_TRACED)))
866 38 : t = vlib_add_trace (vm, node, b0, sizeof (*t));
867 :
868 38 : add_to_flow_record_state (
869 : vm, node, fm, b0, timestamp, len0,
870 38 : flowprobe_get_variant (which, fm->context[which].flags,
871 : ethertype0),
872 : direction, t);
873 : }
874 :
875 : /* verify speculative enqueue, maybe switch current next frame */
876 38 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
877 : to_next, n_left_to_next,
878 : bi0, next0);
879 : }
880 :
881 31 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
882 : }
883 31 : return frame->n_vectors;
884 : }
885 :
886 : static uword
887 3 : flowprobe_input_ip4_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
888 : vlib_frame_t *frame)
889 : {
890 3 : return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP4,
891 : FLOW_DIRECTION_RX);
892 : }
893 :
894 : static uword
895 3 : flowprobe_input_ip6_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
896 : vlib_frame_t *frame)
897 : {
898 3 : return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP6,
899 : FLOW_DIRECTION_RX);
900 : }
901 :
902 : static uword
903 8 : flowprobe_input_l2_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
904 : vlib_frame_t *frame)
905 : {
906 8 : return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_L2,
907 : FLOW_DIRECTION_RX);
908 : }
909 :
910 : static uword
911 4 : flowprobe_output_ip4_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
912 : vlib_frame_t *frame)
913 : {
914 4 : return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP4,
915 : FLOW_DIRECTION_TX);
916 : }
917 :
918 : static uword
919 3 : flowprobe_output_ip6_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
920 : vlib_frame_t *frame)
921 : {
922 3 : return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP6,
923 : FLOW_DIRECTION_TX);
924 : }
925 :
926 : static uword
927 10 : flowprobe_output_l2_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
928 : vlib_frame_t *frame)
929 : {
930 10 : return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_L2,
931 : FLOW_DIRECTION_TX);
932 : }
933 :
934 : static inline void
935 420 : flush_record (flowprobe_variant_t which)
936 : {
937 420 : vlib_main_t *vm = vlib_get_main ();
938 420 : vlib_buffer_t *b = flowprobe_get_buffer (vm, which);
939 420 : if (b)
940 420 : flowprobe_export_send (vm, b, which);
941 420 : }
942 :
943 : void
944 28 : flowprobe_flush_callback_ip4 (void)
945 : {
946 28 : flush_record (FLOW_VARIANT_IP4);
947 28 : }
948 :
949 : void
950 23 : flowprobe_flush_callback_ip6 (void)
951 : {
952 23 : flush_record (FLOW_VARIANT_IP6);
953 23 : }
954 :
955 : void
956 123 : flowprobe_flush_callback_l2 (void)
957 : {
958 123 : flush_record (FLOW_VARIANT_L2);
959 123 : flush_record (FLOW_VARIANT_L2_IP4);
960 123 : flush_record (FLOW_VARIANT_L2_IP6);
961 123 : }
962 :
963 : void
964 3 : flowprobe_delete_by_index (u32 my_cpu_number, u32 poolindex)
965 : {
966 3 : flowprobe_main_t *fm = &flowprobe_main;
967 : flowprobe_entry_t *e;
968 : u32 h;
969 :
970 3 : e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], poolindex);
971 :
972 : /* Get my index */
973 3 : h = flowprobe_hash (&e->key);
974 :
975 : /* Reset hash */
976 3 : fm->hash_per_worker[my_cpu_number][h] = ~0;
977 :
978 3 : pool_put_index (fm->pool_per_worker[my_cpu_number], poolindex);
979 3 : }
980 :
981 :
982 : /* Per worker process processing the active/passive expired entries */
983 : static uword
984 146453 : flowprobe_walker_process (vlib_main_t * vm,
985 : vlib_node_runtime_t * rt, vlib_frame_t * f)
986 : {
987 146453 : flowprobe_main_t *fm = &flowprobe_main;
988 : flowprobe_entry_t *e;
989 146453 : ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
990 :
991 : /*
992 : * $$$$ Remove this check from here and track FRM status and disable
993 : * this process if required.
994 : */
995 253245 : if (ip_address_is_zero (&exp->ipfix_collector) ||
996 106792 : ip_address_is_zero (&exp->src_address))
997 : {
998 39661 : fm->disabled = true;
999 39661 : return 0;
1000 : }
1001 106792 : fm->disabled = false;
1002 :
1003 106792 : u32 cpu_index = os_get_thread_index ();
1004 106792 : u32 *to_be_removed = 0, *i;
1005 :
1006 : /*
1007 : * Tick the timer when required and process the vector of expired
1008 : * timers
1009 : */
1010 106792 : f64 start_time = vlib_time_now (vm);
1011 106792 : u32 count = 0;
1012 :
1013 106792 : tw_timer_expire_timers_2t_1w_2048sl (fm->timers_per_worker[cpu_index],
1014 : start_time);
1015 :
1016 106795 : vec_foreach (i, fm->expired_passive_per_worker[cpu_index])
1017 : {
1018 3 : u32 exported = 0;
1019 3 : f64 now = vlib_time_now (vm);
1020 3 : if (now > start_time + 100e-6
1021 3 : || exported > FLOW_MAXIMUM_EXPORT_ENTRIES - 1)
1022 : break;
1023 :
1024 3 : if (pool_is_free_index (fm->pool_per_worker[cpu_index], *i))
1025 : {
1026 0 : clib_warning ("Element is %d is freed already\n", *i);
1027 0 : continue;
1028 : }
1029 : else
1030 3 : e = pool_elt_at_index (fm->pool_per_worker[cpu_index], *i);
1031 :
1032 : /* Check last update timestamp. If it is longer than passive time nuke
1033 : * entry. Otherwise restart timer with what's left
1034 : * Premature passive timer by more than 10%
1035 : */
1036 3 : if ((now - e->last_updated) < (u64) (fm->passive_timer * 0.9))
1037 : {
1038 0 : u64 delta = fm->passive_timer - (now - e->last_updated);
1039 0 : e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
1040 0 : (fm->timers_per_worker[cpu_index], *i, 0, delta);
1041 : }
1042 : else /* Nuke entry */
1043 : {
1044 3 : vec_add1 (to_be_removed, *i);
1045 : }
1046 : /* If anything to report send it to the exporter */
1047 3 : if (e->packetcount && now > e->last_exported + fm->active_timer)
1048 : {
1049 3 : exported++;
1050 3 : flowprobe_export_entry (vm, e);
1051 : }
1052 3 : count++;
1053 : }
1054 106792 : if (count)
1055 3 : vec_delete (fm->expired_passive_per_worker[cpu_index], count, 0);
1056 :
1057 106795 : vec_foreach (i, to_be_removed) flowprobe_delete_by_index (cpu_index, *i);
1058 106792 : vec_free (to_be_removed);
1059 :
1060 106792 : return 0;
1061 : }
1062 :
1063 : /* *INDENT-OFF* */
1064 144044 : VLIB_REGISTER_NODE (flowprobe_input_ip4_node) = {
1065 : .function = flowprobe_input_ip4_node_fn,
1066 : .name = "flowprobe-input-ip4",
1067 : .vector_size = sizeof (u32),
1068 : .format_trace = format_flowprobe_trace,
1069 : .type = VLIB_NODE_TYPE_INTERNAL,
1070 : .n_errors = ARRAY_LEN (flowprobe_error_strings),
1071 : .error_strings = flowprobe_error_strings,
1072 : .n_next_nodes = FLOWPROBE_N_NEXT,
1073 : .next_nodes = FLOWPROBE_NEXT_NODES,
1074 : };
1075 144044 : VLIB_REGISTER_NODE (flowprobe_input_ip6_node) = {
1076 : .function = flowprobe_input_ip6_node_fn,
1077 : .name = "flowprobe-input-ip6",
1078 : .vector_size = sizeof (u32),
1079 : .format_trace = format_flowprobe_trace,
1080 : .type = VLIB_NODE_TYPE_INTERNAL,
1081 : .n_errors = ARRAY_LEN (flowprobe_error_strings),
1082 : .error_strings = flowprobe_error_strings,
1083 : .n_next_nodes = FLOWPROBE_N_NEXT,
1084 : .next_nodes = FLOWPROBE_NEXT_NODES,
1085 : };
1086 144044 : VLIB_REGISTER_NODE (flowprobe_input_l2_node) = {
1087 : .function = flowprobe_input_l2_node_fn,
1088 : .name = "flowprobe-input-l2",
1089 : .vector_size = sizeof (u32),
1090 : .format_trace = format_flowprobe_trace,
1091 : .type = VLIB_NODE_TYPE_INTERNAL,
1092 : .n_errors = ARRAY_LEN (flowprobe_error_strings),
1093 : .error_strings = flowprobe_error_strings,
1094 : .n_next_nodes = FLOWPROBE_N_NEXT,
1095 : .next_nodes = FLOWPROBE_NEXT_NODES,
1096 : };
1097 144044 : VLIB_REGISTER_NODE (flowprobe_output_ip4_node) = {
1098 : .function = flowprobe_output_ip4_node_fn,
1099 : .name = "flowprobe-output-ip4",
1100 : .vector_size = sizeof (u32),
1101 : .format_trace = format_flowprobe_trace,
1102 : .type = VLIB_NODE_TYPE_INTERNAL,
1103 : .n_errors = ARRAY_LEN (flowprobe_error_strings),
1104 : .error_strings = flowprobe_error_strings,
1105 : .n_next_nodes = FLOWPROBE_N_NEXT,
1106 : .next_nodes = FLOWPROBE_NEXT_NODES,
1107 : };
1108 144044 : VLIB_REGISTER_NODE (flowprobe_output_ip6_node) = {
1109 : .function = flowprobe_output_ip6_node_fn,
1110 : .name = "flowprobe-output-ip6",
1111 : .vector_size = sizeof (u32),
1112 : .format_trace = format_flowprobe_trace,
1113 : .type = VLIB_NODE_TYPE_INTERNAL,
1114 : .n_errors = ARRAY_LEN (flowprobe_error_strings),
1115 : .error_strings = flowprobe_error_strings,
1116 : .n_next_nodes = FLOWPROBE_N_NEXT,
1117 : .next_nodes = FLOWPROBE_NEXT_NODES,
1118 : };
1119 144044 : VLIB_REGISTER_NODE (flowprobe_output_l2_node) = {
1120 : .function = flowprobe_output_l2_node_fn,
1121 : .name = "flowprobe-output-l2",
1122 : .vector_size = sizeof (u32),
1123 : .format_trace = format_flowprobe_trace,
1124 : .type = VLIB_NODE_TYPE_INTERNAL,
1125 : .n_errors = ARRAY_LEN (flowprobe_error_strings),
1126 : .error_strings = flowprobe_error_strings,
1127 : .n_next_nodes = FLOWPROBE_N_NEXT,
1128 : .next_nodes = FLOWPROBE_NEXT_NODES,
1129 : };
1130 144044 : VLIB_REGISTER_NODE (flowprobe_walker_node) = {
1131 : .function = flowprobe_walker_process,
1132 : .name = "flowprobe-walker",
1133 : .type = VLIB_NODE_TYPE_INPUT,
1134 : .state = VLIB_NODE_STATE_INTERRUPT,
1135 : };
1136 : /* *INDENT-ON* */
1137 :
1138 : /*
1139 : * fd.io coding-style-patch-verification: ON
1140 : *
1141 : * Local Variables:
1142 : * eval: (c-set-style "gnu")
1143 : * End:
1144 : */
|