Line data Source code
1 : /*
2 : * Copyright (c) 2016 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 : #ifndef __included_ioam_export_h__
16 : #define __included_ioam_export_h__
17 :
18 : #include <vnet/vnet.h>
19 : #include <vnet/ip/ip.h>
20 : #include <vnet/ip/ip_packet.h>
21 : #include <vnet/ip/ip4_packet.h>
22 : #include <vnet/ip/ip6_packet.h>
23 : #include <vnet/ip/ip6_hop_by_hop.h>
24 : #include <vnet/udp/udp_local.h>
25 : #include <vnet/udp/udp_packet.h>
26 : #include <vnet/ipfix-export/ipfix_packet.h>
27 :
28 : #include <vppinfra/pool.h>
29 : #include <vppinfra/hash.h>
30 : #include <vppinfra/error.h>
31 : #include <vppinfra/elog.h>
32 : #include <vppinfra/lock.h>
33 :
34 : #include <vlib/threads.h>
35 :
36 : typedef struct ioam_export_buffer
37 : {
38 : /** Required for pool_get_aligned */
39 : CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
40 : /* Allocated buffer */
41 : u32 buffer_index;
42 : u64 touched_at;
43 : u8 records_in_this_buffer;
44 : } ioam_export_buffer_t;
45 :
46 :
47 : typedef struct
48 : {
49 : /* API message ID base */
50 : u16 msg_id_base;
51 : u16 set_id;
52 :
53 : /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
54 : u8 *record_header;
55 : u32 sequence_number;
56 : u32 domain_id;
57 :
58 : /* ipfix collector, our ip address */
59 : ip4_address_t ipfix_collector;
60 : ip4_address_t src_address;
61 :
62 : /* Pool of ioam_export_buffer_t */
63 : ioam_export_buffer_t *buffer_pool;
64 : /* Vector of per thread ioam_export_buffer_t to buffer pool index */
65 : u32 *buffer_per_thread;
66 : /* Lock per thread to swap buffers between worker and timer process */
67 : clib_spinlock_t *lockp;
68 :
69 : /* time scale transform */
70 : u32 unix_time_0;
71 : f64 vlib_time_0;
72 :
73 : /* convenience */
74 : vlib_main_t *vlib_main;
75 : vnet_main_t *vnet_main;
76 : ethernet_main_t *ethernet_main;
77 : u32 next_node_index;
78 :
79 : uword my_hbh_slot;
80 : u32 export_process_node_index;
81 : } ioam_export_main_t;
82 :
83 :
84 : #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
85 : /*
86 : * Number of records in a buffer
87 : * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
88 : */
89 : #define DEFAULT_EXPORT_RECORDS 7
90 :
91 : inline static void
92 0 : ioam_export_set_next_node (ioam_export_main_t * em, u8 * next_node_name)
93 : {
94 : vlib_node_t *next_node;
95 :
96 0 : next_node = vlib_get_node_by_name (em->vlib_main, next_node_name);
97 0 : em->next_node_index = next_node->index;
98 0 : }
99 :
100 : inline static void
101 1725 : ioam_export_reset_next_node (ioam_export_main_t * em)
102 : {
103 : vlib_node_t *next_node;
104 :
105 1725 : next_node = vlib_get_node_by_name (em->vlib_main, (u8 *) "ip4-lookup");
106 1725 : em->next_node_index = next_node->index;
107 1725 : }
108 :
109 : always_inline ioam_export_buffer_t *
110 0 : ioam_export_get_my_buffer (ioam_export_main_t * em, u32 thread_id)
111 : {
112 :
113 0 : if (vec_len (em->buffer_per_thread) > thread_id)
114 0 : return (pool_elt_at_index
115 : (em->buffer_pool, em->buffer_per_thread[thread_id]));
116 0 : return (0);
117 : }
118 :
119 : inline static int
120 0 : ioam_export_buffer_add_header (ioam_export_main_t * em, vlib_buffer_t * b0)
121 : {
122 0 : clib_memcpy_fast (b0->data, em->record_header, vec_len (em->record_header));
123 0 : b0->current_data = 0;
124 0 : b0->current_length = vec_len (em->record_header);
125 0 : b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
126 0 : return (1);
127 : }
128 :
129 : inline static int
130 0 : ioam_export_init_buffer (ioam_export_main_t * em, vlib_main_t * vm,
131 : ioam_export_buffer_t * eb)
132 : {
133 0 : vlib_buffer_t *b = 0;
134 :
135 0 : if (!eb)
136 0 : return (-1);
137 : /* TODO: Perhaps buffer init from template here */
138 0 : if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
139 0 : return (-2);
140 0 : eb->records_in_this_buffer = 0;
141 0 : eb->touched_at = vlib_time_now (vm);
142 0 : b = vlib_get_buffer (vm, eb->buffer_index);
143 0 : (void) ioam_export_buffer_add_header (em, b);
144 0 : vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
145 0 : vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
146 0 : return (1);
147 : }
148 :
149 : inline static void
150 0 : ioam_export_thread_buffer_free (ioam_export_main_t * em)
151 : {
152 0 : vlib_main_t *vm = em->vlib_main;
153 0 : ioam_export_buffer_t *eb = 0;
154 : int i;
155 0 : for (i = 0; i < vec_len (em->buffer_per_thread); i++)
156 : {
157 0 : eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
158 0 : if (eb)
159 0 : vlib_buffer_free (vm, &(eb->buffer_index), 1);
160 : }
161 0 : for (i = 0; i < vec_len (em->lockp); i++)
162 0 : clib_mem_free ((void *) em->lockp[i]);
163 0 : vec_free (em->buffer_per_thread);
164 0 : pool_free (em->buffer_pool);
165 0 : vec_free (em->lockp);
166 0 : em->buffer_per_thread = 0;
167 0 : em->buffer_pool = 0;
168 0 : em->lockp = 0;
169 0 : }
170 :
171 : inline static int
172 0 : ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
173 : {
174 0 : int no_of_threads = vec_len (vlib_worker_threads);
175 : int i;
176 0 : ioam_export_buffer_t *eb = 0;
177 :
178 0 : pool_alloc_aligned (em->buffer_pool,
179 : no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
180 0 : vec_validate_aligned (em->buffer_per_thread,
181 : no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
182 0 : vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
183 :
184 0 : if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
185 : {
186 0 : return (-1);
187 : }
188 0 : for (i = 0; i < no_of_threads; i++)
189 : {
190 0 : eb = 0;
191 0 : pool_get_aligned (em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
192 0 : clib_memset (eb, 0, sizeof (*eb));
193 0 : em->buffer_per_thread[i] = eb - em->buffer_pool;
194 0 : if (ioam_export_init_buffer (em, vm, eb) != 1)
195 : {
196 0 : ioam_export_thread_buffer_free (em);
197 0 : return (-2);
198 : }
199 0 : clib_spinlock_init (&em->lockp[i]);
200 : }
201 0 : return (1);
202 : }
203 :
204 : #define IPFIX_IOAM_EXPORT_ID 272
205 : #define IPFIX_VXLAN_IOAM_EXPORT_ID 273
206 :
207 : /* Used to build the rewrite */
208 : /* data set packet */
209 : typedef struct
210 : {
211 : ipfix_message_header_t h;
212 : ipfix_set_header_t s;
213 : } ipfix_data_packet_t;
214 :
215 : typedef struct
216 : {
217 : ip4_header_t ip4;
218 : udp_header_t udp;
219 : ipfix_data_packet_t ipfix;
220 : } ip4_ipfix_data_packet_t;
221 :
222 :
223 : inline static void
224 0 : ioam_export_header_cleanup (ioam_export_main_t * em,
225 : ip4_address_t * collector_address,
226 : ip4_address_t * src_address)
227 : {
228 0 : vec_free (em->record_header);
229 0 : em->record_header = 0;
230 0 : }
231 :
232 : inline static int
233 0 : ioam_export_header_create (ioam_export_main_t * em,
234 : ip4_address_t * collector_address,
235 : ip4_address_t * src_address)
236 : {
237 : ip4_header_t *ip;
238 : udp_header_t *udp;
239 : ipfix_message_header_t *h;
240 : ipfix_set_header_t *s;
241 0 : u8 *rewrite = 0;
242 : ip4_ipfix_data_packet_t *tp;
243 :
244 :
245 : /* allocate rewrite space */
246 0 : vec_validate_aligned (rewrite,
247 : sizeof (ip4_ipfix_data_packet_t) - 1,
248 : CLIB_CACHE_LINE_BYTES);
249 :
250 0 : tp = (ip4_ipfix_data_packet_t *) rewrite;
251 0 : ip = (ip4_header_t *) & tp->ip4;
252 0 : udp = (udp_header_t *) (ip + 1);
253 0 : h = (ipfix_message_header_t *) (udp + 1);
254 0 : s = (ipfix_set_header_t *) (h + 1);
255 :
256 0 : ip->ip_version_and_header_length = 0x45;
257 0 : ip->ttl = 254;
258 0 : ip->protocol = IP_PROTOCOL_UDP;
259 0 : ip->src_address.as_u32 = src_address->as_u32;
260 0 : ip->dst_address.as_u32 = collector_address->as_u32;
261 0 : udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
262 0 : udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
263 : /* FIXUP: UDP length */
264 0 : udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
265 : (DEFAULT_EXPORT_RECORDS *
266 0 : DEFAULT_EXPORT_SIZE) - sizeof (*ip));
267 :
268 : /* FIXUP: message header export_time */
269 : /* FIXUP: message header sequence_number */
270 0 : h->domain_id = clib_host_to_net_u32 (em->domain_id);
271 :
272 : /*FIXUP: Setid length in octets if records exported are not default */
273 0 : s->set_id_length = ipfix_set_id_length (em->set_id,
274 : (sizeof (*s) +
275 : (DEFAULT_EXPORT_RECORDS *
276 : DEFAULT_EXPORT_SIZE)));
277 :
278 : /* FIXUP: h version and length length in octets if records exported are not default */
279 0 : h->version_length = version_length (sizeof (*h) +
280 : (sizeof (*s) +
281 : (DEFAULT_EXPORT_RECORDS *
282 : DEFAULT_EXPORT_SIZE)));
283 :
284 : /* FIXUP: ip length if records exported are not default */
285 : /* FIXUP: ip checksum if records exported are not default */
286 0 : ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
287 : (DEFAULT_EXPORT_RECORDS *
288 : DEFAULT_EXPORT_SIZE));
289 0 : ip->checksum = ip4_header_checksum (ip);
290 0 : vec_set_len (rewrite, sizeof (ip4_ipfix_data_packet_t));
291 0 : em->record_header = rewrite;
292 0 : return (1);
293 : }
294 :
295 : inline static int
296 0 : ioam_export_send_buffer (ioam_export_main_t * em, vlib_main_t * vm,
297 : ioam_export_buffer_t * eb)
298 : {
299 : ip4_header_t *ip;
300 : udp_header_t *udp;
301 : ipfix_message_header_t *h;
302 : ipfix_set_header_t *s;
303 : ip4_ipfix_data_packet_t *tp;
304 : vlib_buffer_t *b0;
305 : u16 new_l0, old_l0;
306 : ip_csum_t sum0;
307 0 : vlib_frame_t *nf = 0;
308 : u32 *to_next;
309 :
310 0 : b0 = vlib_get_buffer (vm, eb->buffer_index);
311 0 : tp = vlib_buffer_get_current (b0);
312 0 : ip = (ip4_header_t *) & tp->ip4;
313 0 : udp = (udp_header_t *) (ip + 1);
314 0 : h = (ipfix_message_header_t *) (udp + 1);
315 0 : s = (ipfix_set_header_t *) (h + 1);
316 :
317 : /* FIXUP: message header export_time */
318 0 : h->export_time = clib_host_to_net_u32 ((u32)
319 0 : (((f64) em->unix_time_0) +
320 0 : (vlib_time_now (em->vlib_main) -
321 0 : em->vlib_time_0)));
322 :
323 : /* FIXUP: message header sequence_number */
324 0 : h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
325 :
326 : /* FIXUP: lengths if different from default */
327 0 : if (PREDICT_FALSE (eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS))
328 : {
329 0 : s->set_id_length = ipfix_set_id_length (em->set_id /* set_id */ ,
330 0 : b0->current_length -
331 : (sizeof (*ip) + sizeof (*udp) +
332 : sizeof (*h)));
333 0 : h->version_length =
334 0 : version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
335 0 : sum0 = ip->checksum;
336 0 : old_l0 = ip->length;
337 0 : new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
338 0 : sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
339 : length /* changed member */ );
340 0 : ip->checksum = ip_csum_fold (sum0);
341 0 : ip->length = new_l0;
342 0 : udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
343 : }
344 :
345 : /* Enqueue pkts to ip4-lookup */
346 :
347 0 : nf = vlib_get_frame_to_node (vm, em->next_node_index);
348 0 : nf->n_vectors = 0;
349 0 : to_next = vlib_frame_vector_args (nf);
350 0 : nf->n_vectors = 1;
351 0 : to_next[0] = eb->buffer_index;
352 0 : vlib_put_frame_to_node (vm, em->next_node_index, nf);
353 0 : return (1);
354 :
355 : }
356 :
357 : #define EXPORT_TIMEOUT (20.0)
358 : #define THREAD_PERIOD (30.0)
359 : inline static uword
360 1725 : ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
361 : vlib_node_runtime_t * rt, vlib_frame_t * f,
362 : u32 index)
363 : {
364 : f64 now;
365 1725 : f64 timeout = 30.0;
366 : uword event_type;
367 1725 : uword *event_data = 0;
368 : int i;
369 1725 : ioam_export_buffer_t *eb = 0, *new_eb = 0;
370 1725 : u32 *vec_buffer_indices = 0;
371 1725 : u32 *vec_buffer_to_be_sent = 0;
372 1725 : u32 *thread_index = 0;
373 1725 : u32 new_pool_index = 0;
374 :
375 1725 : em->export_process_node_index = index;
376 : /* Wait for Godot... */
377 1725 : vlib_process_wait_for_event_or_clock (vm, 1e9);
378 0 : event_type = vlib_process_get_events (vm, &event_data);
379 0 : if (event_type != 1)
380 0 : clib_warning ("bogus kickoff event received, %d", event_type);
381 0 : vec_reset_length (event_data);
382 :
383 : while (1)
384 : {
385 0 : vlib_process_wait_for_event_or_clock (vm, timeout);
386 0 : event_type = vlib_process_get_events (vm, &event_data);
387 0 : switch (event_type)
388 : {
389 0 : case 2: /* Stop and Wait for kickoff again */
390 0 : timeout = 1e9;
391 0 : break;
392 0 : case 1: /* kickoff : Check for unsent buffers */
393 0 : timeout = THREAD_PERIOD;
394 0 : break;
395 0 : case ~0: /* timeout */
396 0 : break;
397 : }
398 0 : vec_reset_length (event_data);
399 0 : now = vlib_time_now (vm);
400 : /*
401 : * Create buffers for threads that are not active enough
402 : * to send out the export records
403 : */
404 0 : for (i = 0; i < vec_len (em->buffer_per_thread); i++)
405 : {
406 : /* If the worker thread is processing export records ignore further checks */
407 0 : if (CLIB_SPINLOCK_IS_LOCKED (&em->lockp[i]))
408 0 : continue;
409 0 : eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
410 0 : if (eb->records_in_this_buffer > 0
411 0 : && now > (eb->touched_at + EXPORT_TIMEOUT))
412 : {
413 0 : pool_get_aligned (em->buffer_pool, new_eb,
414 : CLIB_CACHE_LINE_BYTES);
415 0 : clib_memset (new_eb, 0, sizeof (*new_eb));
416 0 : if (ioam_export_init_buffer (em, vm, new_eb) == 1)
417 : {
418 0 : new_pool_index = new_eb - em->buffer_pool;
419 0 : vec_add (vec_buffer_indices, &new_pool_index, 1);
420 0 : vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
421 : 1);
422 0 : vec_add (thread_index, &i, 1);
423 : }
424 : else
425 : {
426 0 : pool_put (em->buffer_pool, new_eb);
427 : /*Give up */
428 0 : goto CLEANUP;
429 : }
430 : }
431 : }
432 0 : if (vec_len (thread_index) != 0)
433 : {
434 : /*
435 : * Now swap the buffers out
436 : */
437 0 : for (i = 0; i < vec_len (thread_index); i++)
438 : {
439 0 : clib_spinlock_lock (&em->lockp[thread_index[i]]);
440 0 : em->buffer_per_thread[thread_index[i]] =
441 0 : vec_pop (vec_buffer_indices);
442 0 : clib_spinlock_unlock (&em->lockp[thread_index[i]]);
443 : }
444 :
445 : /* Send the buffers */
446 0 : for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
447 : {
448 0 : eb =
449 0 : pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
450 0 : ioam_export_send_buffer (em, vm, eb);
451 0 : pool_put (em->buffer_pool, eb);
452 : }
453 : }
454 :
455 0 : CLEANUP:
456 : /* Free any leftover/unused buffers and everything that was allocated */
457 0 : for (i = 0; i < vec_len (vec_buffer_indices); i++)
458 : {
459 0 : new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
460 0 : vlib_buffer_free (vm, &new_eb->buffer_index, 1);
461 0 : pool_put (em->buffer_pool, new_eb);
462 : }
463 0 : vec_free (vec_buffer_indices);
464 0 : vec_free (vec_buffer_to_be_sent);
465 0 : vec_free (thread_index);
466 : }
467 : return 0; /* not so much */
468 : }
469 :
470 : #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC) \
471 : do { \
472 : u32 n_left_from, *from, *to_next; \
473 : export_next_t next_index; \
474 : u32 pkts_recorded = 0; \
475 : ioam_export_buffer_t *my_buf = 0; \
476 : vlib_buffer_t *eb0 = 0; \
477 : u32 ebi0 = 0; \
478 : from = vlib_frame_vector_args (F); \
479 : n_left_from = (F)->n_vectors; \
480 : next_index = (N)->cached_next_index; \
481 : clib_spinlock_lock (&(EM)->lockp[(VM)->thread_index]); \
482 : my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \
483 : my_buf->touched_at = vlib_time_now (VM); \
484 : while (n_left_from > 0) \
485 : { \
486 : u32 n_left_to_next; \
487 : vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next); \
488 : while (n_left_from >= 4 && n_left_to_next >= 2) \
489 : { \
490 : u32 next0 = NEXT; \
491 : u32 next1 = NEXT; \
492 : u32 bi0, bi1; \
493 : HTYPE *ip0, *ip1; \
494 : vlib_buffer_t *p0, *p1; \
495 : u32 ip_len0, ip_len1; \
496 : { \
497 : vlib_buffer_t *p2, *p3; \
498 : p2 = vlib_get_buffer (VM, from[2]); \
499 : p3 = vlib_get_buffer (VM, from[3]); \
500 : vlib_prefetch_buffer_header (p2, LOAD); \
501 : vlib_prefetch_buffer_header (p3, LOAD); \
502 : CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \
503 : CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \
504 : } \
505 : to_next[0] = bi0 = from[0]; \
506 : to_next[1] = bi1 = from[1]; \
507 : from += 2; \
508 : to_next += 2; \
509 : n_left_from -= 2; \
510 : n_left_to_next -= 2; \
511 : p0 = vlib_get_buffer (VM, bi0); \
512 : p1 = vlib_get_buffer (VM, bi1); \
513 : ip0 = vlib_buffer_get_current (p0); \
514 : ip1 = vlib_buffer_get_current (p1); \
515 : ip_len0 = \
516 : clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \
517 : ip_len1 = \
518 : clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE); \
519 : ebi0 = my_buf->buffer_index; \
520 : eb0 = vlib_get_buffer (VM, ebi0); \
521 : if (PREDICT_FALSE (eb0 == 0)) \
522 : goto NO_BUFFER1; \
523 : ip_len0 = \
524 : ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \
525 : ip_len1 = \
526 : ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1; \
527 : copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \
528 : FIXUP_FUNC(eb0, p0); \
529 : eb0->current_length += DEFAULT_EXPORT_SIZE; \
530 : my_buf->records_in_this_buffer++; \
531 : if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
532 : { \
533 : ioam_export_send_buffer (EM, VM, my_buf); \
534 : ioam_export_init_buffer (EM, VM, my_buf); \
535 : } \
536 : ebi0 = my_buf->buffer_index; \
537 : eb0 = vlib_get_buffer (VM, ebi0); \
538 : if (PREDICT_FALSE (eb0 == 0)) \
539 : goto NO_BUFFER1; \
540 : copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1); \
541 : FIXUP_FUNC(eb0, p1); \
542 : eb0->current_length += DEFAULT_EXPORT_SIZE; \
543 : my_buf->records_in_this_buffer++; \
544 : if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
545 : { \
546 : ioam_export_send_buffer (EM, VM, my_buf); \
547 : ioam_export_init_buffer (EM, VM, my_buf); \
548 : } \
549 : pkts_recorded += 2; \
550 : if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE))) \
551 : { \
552 : if (p0->flags & VLIB_BUFFER_IS_TRACED) \
553 : { \
554 : export_trace_t *t = \
555 : vlib_add_trace (VM, node, p0, sizeof (*t)); \
556 : t->flow_label = \
557 : clib_net_to_host_u32 (ip0->V); \
558 : t->next_index = next0; \
559 : } \
560 : if (p1->flags & VLIB_BUFFER_IS_TRACED) \
561 : { \
562 : export_trace_t *t = \
563 : vlib_add_trace (VM, N, p1, sizeof (*t)); \
564 : t->flow_label = \
565 : clib_net_to_host_u32 (ip1->V); \
566 : t->next_index = next1; \
567 : } \
568 : } \
569 : NO_BUFFER1: \
570 : vlib_validate_buffer_enqueue_x2 (VM, N, next_index, \
571 : to_next, n_left_to_next, \
572 : bi0, bi1, next0, next1); \
573 : } \
574 : while (n_left_from > 0 && n_left_to_next > 0) \
575 : { \
576 : u32 bi0; \
577 : vlib_buffer_t *p0; \
578 : u32 next0 = NEXT; \
579 : HTYPE *ip0; \
580 : u32 ip_len0; \
581 : bi0 = from[0]; \
582 : to_next[0] = bi0; \
583 : from += 1; \
584 : to_next += 1; \
585 : n_left_from -= 1; \
586 : n_left_to_next -= 1; \
587 : p0 = vlib_get_buffer (VM, bi0); \
588 : ip0 = vlib_buffer_get_current (p0); \
589 : ip_len0 = \
590 : clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \
591 : ebi0 = my_buf->buffer_index; \
592 : eb0 = vlib_get_buffer (VM, ebi0); \
593 : if (PREDICT_FALSE (eb0 == 0)) \
594 : goto NO_BUFFER; \
595 : ip_len0 = \
596 : ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \
597 : copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \
598 : FIXUP_FUNC(eb0, p0); \
599 : eb0->current_length += DEFAULT_EXPORT_SIZE; \
600 : my_buf->records_in_this_buffer++; \
601 : if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
602 : { \
603 : ioam_export_send_buffer (EM, VM, my_buf); \
604 : ioam_export_init_buffer (EM, VM, my_buf); \
605 : } \
606 : if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE) \
607 : && (p0->flags & VLIB_BUFFER_IS_TRACED))) \
608 : { \
609 : export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t)); \
610 : t->flow_label = \
611 : clib_net_to_host_u32 (ip0->V); \
612 : t->next_index = next0; \
613 : } \
614 : pkts_recorded += 1; \
615 : NO_BUFFER: \
616 : vlib_validate_buffer_enqueue_x1 (VM, N, next_index, \
617 : to_next, n_left_to_next, \
618 : bi0, next0); \
619 : } \
620 : vlib_put_next_frame (VM, N, next_index, n_left_to_next); \
621 : } \
622 : vlib_node_increment_counter (VM, export_node.index, \
623 : EXPORT_ERROR_RECORDED, pkts_recorded); \
624 : clib_spinlock_unlock (&(EM)->lockp[(VM)->thread_index]); \
625 : } while(0)
626 :
627 : #endif /* __included_ioam_export_h__ */
628 :
629 : /*
630 : * fd.io coding-style-patch-verification: ON
631 : *
632 : * Local Variables:
633 : * eval: (c-set-style "gnu")
634 : * End:
635 : */
|