Line data Source code
1 : /*
2 : * Copyright (c) 2017 SUSE LLC.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 : #include <vppinfra/error.h>
16 : #include <vppinfra/hash.h>
17 : #include <vnet/vnet.h>
18 : #include <vnet/ip/ip.h>
19 : #include <vnet/ethernet/ethernet.h>
20 : #include <geneve/geneve.h>
21 :
22 : /* Statistics (not all errors) */
23 : #define foreach_geneve_encap_error \
24 : _(ENCAPSULATED, "good packets encapsulated")
25 :
26 : static char *geneve_encap_error_strings[] = {
27 : #define _(sym,string) string,
28 : foreach_geneve_encap_error
29 : #undef _
30 : };
31 :
32 : typedef enum
33 : {
34 : #define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
35 : foreach_geneve_encap_error
36 : #undef _
37 : GENEVE_ENCAP_N_ERROR,
38 : } geneve_encap_error_t;
39 :
40 : typedef enum
41 : {
42 : GENEVE_ENCAP_NEXT_DROP,
43 : GENEVE_ENCAP_N_NEXT,
44 : } geneve_encap_next_t;
45 :
46 : #define foreach_fixed_header4_offset \
47 : _(0) _(1) _(2) _(3)
48 :
49 : #define foreach_fixed_header6_offset \
50 : _(0) _(1) _(2) _(3) _(4) _(5) _(6)
51 :
52 : always_inline uword
53 5 : geneve_encap_inline (vlib_main_t * vm,
54 : vlib_node_runtime_t * node,
55 : vlib_frame_t * from_frame, u32 is_ip4)
56 : {
57 : u32 n_left_from, next_index, *from, *to_next;
58 5 : geneve_main_t *vxm = &geneve_main;
59 5 : vnet_main_t *vnm = vxm->vnet_main;
60 5 : vnet_interface_main_t *im = &vnm->interface_main;
61 5 : u32 pkts_encapsulated = 0;
62 5 : u16 old_l0 = 0, old_l1 = 0;
63 5 : u32 thread_index = vm->thread_index;
64 : u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
65 5 : u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
66 5 : u32 next0 = 0, next1 = 0;
67 : vnet_hw_interface_t *hi0, *hi1;
68 5 : geneve_tunnel_t *t0 = NULL, *t1 = NULL;
69 5 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
70 :
71 5 : from = vlib_frame_vector_args (from_frame);
72 5 : n_left_from = from_frame->n_vectors;
73 5 : vlib_get_buffers (vm, from, bufs, n_left_from);
74 :
75 5 : next_index = node->cached_next_index;
76 5 : stats_sw_if_index = node->runtime_data[0];
77 5 : stats_n_packets = stats_n_bytes = 0;
78 :
79 10 : while (n_left_from > 0)
80 : {
81 : u32 n_left_to_next;
82 :
83 5 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
84 :
85 9 : while (n_left_from >= 4 && n_left_to_next >= 2)
86 : {
87 : u32 bi0, bi1;
88 : u32 flow_hash0, flow_hash1;
89 : u32 len0, len1;
90 : ip4_header_t *ip4_0, *ip4_1;
91 : ip6_header_t *ip6_0, *ip6_1;
92 : udp_header_t *udp0, *udp1;
93 : u64 *copy_src0, *copy_dst0;
94 : u64 *copy_src1, *copy_dst1;
95 : u32 *copy_src_last0, *copy_dst_last0;
96 : u32 *copy_src_last1, *copy_dst_last1;
97 : u16 new_l0, new_l1;
98 : ip_csum_t sum0, sum1;
99 :
100 : /* Prefetch next iteration. */
101 : {
102 4 : vlib_prefetch_buffer_header (b[2], LOAD);
103 4 : vlib_prefetch_buffer_header (b[3], LOAD);
104 :
105 4 : CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
106 : 2 * CLIB_CACHE_LINE_BYTES, LOAD);
107 4 : CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
108 : 2 * CLIB_CACHE_LINE_BYTES, LOAD);
109 : }
110 :
111 4 : bi0 = from[0];
112 4 : bi1 = from[1];
113 4 : to_next[0] = bi0;
114 4 : to_next[1] = bi1;
115 4 : from += 2;
116 4 : to_next += 2;
117 4 : n_left_to_next -= 2;
118 4 : n_left_from -= 2;
119 :
120 4 : flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
121 4 : flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
122 :
123 :
124 : /* Get next node index and adj index from tunnel next_dpo */
125 4 : if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
126 : {
127 4 : sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
128 4 : hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
129 4 : t0 = &vxm->tunnels[hi0->dev_instance];
130 : /* Note: change to always set next0 if it may be set to drop */
131 4 : next0 = t0->next_dpo.dpoi_next_node;
132 : }
133 :
134 4 : ALWAYS_ASSERT (t0 != NULL);
135 :
136 4 : vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
137 :
138 : /* Get next node index and adj index from tunnel next_dpo */
139 4 : if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
140 : {
141 4 : sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
142 4 : hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
143 4 : t1 = &vxm->tunnels[hi1->dev_instance];
144 : /* Note: change to always set next1 if it may be set to drop */
145 4 : next1 = t1->next_dpo.dpoi_next_node;
146 : }
147 :
148 4 : ALWAYS_ASSERT (t1 != NULL);
149 :
150 4 : vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
151 :
152 : /* Apply the rewrite string. $$$$ vnet_rewrite? */
153 4 : vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
154 4 : vlib_buffer_advance (b[1], -(word) _vec_len (t1->rewrite));
155 :
156 4 : if (is_ip4)
157 : {
158 4 : u8 ip4_geneve_base_header_len =
159 : sizeof (ip4_header_t) + sizeof (udp_header_t) +
160 : GENEVE_BASE_HEADER_LENGTH;
161 4 : u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
162 4 : u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
163 : #if SUPPORT_OPTIONS_HEADER==1
164 : ip4_geneve_header_total_len0 += t0->options_len;
165 : ip4_geneve_header_total_len1 += t1->options_len;
166 : #endif
167 4 : ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
168 4 : ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
169 :
170 4 : ip4_0 = vlib_buffer_get_current (b[0]);
171 4 : ip4_1 = vlib_buffer_get_current (b[1]);
172 :
173 : /* Copy the fixed header */
174 4 : copy_dst0 = (u64 *) ip4_0;
175 4 : copy_src0 = (u64 *) t0->rewrite;
176 4 : copy_dst1 = (u64 *) ip4_1;
177 4 : copy_src1 = (u64 *) t1->rewrite;
178 : /* Copy first 32 octets 8-bytes at a time */
179 : #define _(offs) copy_dst0[offs] = copy_src0[offs];
180 4 : foreach_fixed_header4_offset;
181 : #undef _
182 : #define _(offs) copy_dst1[offs] = copy_src1[offs];
183 4 : foreach_fixed_header4_offset;
184 : #undef _
185 : /* Last 4 octets. Hopefully gcc will be our friend */
186 4 : copy_dst_last0 = (u32 *) (©_dst0[4]);
187 4 : copy_src_last0 = (u32 *) (©_src0[4]);
188 4 : copy_dst_last0[0] = copy_src_last0[0];
189 4 : copy_dst_last1 = (u32 *) (©_dst1[4]);
190 4 : copy_src_last1 = (u32 *) (©_src1[4]);
191 4 : copy_dst_last1[0] = copy_src_last1[0];
192 :
193 : /* Fix the IP4 checksum and length */
194 4 : sum0 = ip4_0->checksum;
195 : new_l0 = /* old_l0 always 0, see the rewrite setup */
196 4 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
197 4 : sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
198 : length /* changed member */ );
199 4 : ip4_0->checksum = ip_csum_fold (sum0);
200 4 : ip4_0->length = new_l0;
201 4 : sum1 = ip4_1->checksum;
202 : new_l1 = /* old_l1 always 0, see the rewrite setup */
203 4 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]));
204 4 : sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
205 : length /* changed member */ );
206 4 : ip4_1->checksum = ip_csum_fold (sum1);
207 4 : ip4_1->length = new_l1;
208 :
209 : /* Fix UDP length and set source port */
210 4 : udp0 = (udp_header_t *) (ip4_0 + 1);
211 : new_l0 =
212 4 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
213 : sizeof (*ip4_0));
214 4 : udp0->length = new_l0;
215 4 : udp0->src_port = flow_hash0;
216 4 : udp1 = (udp_header_t *) (ip4_1 + 1);
217 : new_l1 =
218 4 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]) -
219 : sizeof (*ip4_1));
220 4 : udp1->length = new_l1;
221 4 : udp1->src_port = flow_hash1;
222 : }
223 : else /* ipv6 */
224 : {
225 0 : int bogus = 0;
226 :
227 0 : u8 ip6_geneve_base_header_len =
228 : sizeof (ip6_header_t) + sizeof (udp_header_t) +
229 : GENEVE_BASE_HEADER_LENGTH;
230 0 : u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
231 0 : u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
232 : #if SUPPORT_OPTIONS_HEADER==1
233 : ip6_geneve_header_total_len0 += t0->options_len;
234 : ip6_geneve_header_total_len1 += t1->options_len;
235 : #endif
236 0 : ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
237 0 : ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
238 :
239 0 : ip6_0 = vlib_buffer_get_current (b[0]);
240 0 : ip6_1 = vlib_buffer_get_current (b[1]);
241 :
242 : /* Copy the fixed header */
243 0 : copy_dst0 = (u64 *) ip6_0;
244 0 : copy_src0 = (u64 *) t0->rewrite;
245 0 : copy_dst1 = (u64 *) ip6_1;
246 0 : copy_src1 = (u64 *) t1->rewrite;
247 : /* Copy first 56 (ip6) octets 8-bytes at a time */
248 : #define _(offs) copy_dst0[offs] = copy_src0[offs];
249 0 : foreach_fixed_header6_offset;
250 : #undef _
251 : #define _(offs) copy_dst1[offs] = copy_src1[offs];
252 0 : foreach_fixed_header6_offset;
253 : #undef _
254 : /* Fix IP6 payload length */
255 : new_l0 =
256 0 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
257 0 : - sizeof (*ip6_0));
258 0 : ip6_0->payload_length = new_l0;
259 : new_l1 =
260 0 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1])
261 0 : - sizeof (*ip6_1));
262 0 : ip6_1->payload_length = new_l1;
263 :
264 : /* Fix UDP length and set source port */
265 0 : udp0 = (udp_header_t *) (ip6_0 + 1);
266 0 : udp0->length = new_l0;
267 0 : udp0->src_port = flow_hash0;
268 0 : udp1 = (udp_header_t *) (ip6_1 + 1);
269 0 : udp1->length = new_l1;
270 0 : udp1->src_port = flow_hash1;
271 :
272 : /* IPv6 UDP checksum is mandatory */
273 0 : udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
274 : ip6_0,
275 : &bogus);
276 0 : ASSERT (bogus == 0);
277 0 : if (udp0->checksum == 0)
278 0 : udp0->checksum = 0xffff;
279 0 : udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[1],
280 : ip6_1,
281 : &bogus);
282 0 : ASSERT (bogus == 0);
283 0 : if (udp1->checksum == 0)
284 0 : udp1->checksum = 0xffff;
285 : }
286 :
287 4 : pkts_encapsulated += 2;
288 4 : len0 = vlib_buffer_length_in_chain (vm, b[0]);
289 4 : len1 = vlib_buffer_length_in_chain (vm, b[1]);
290 4 : stats_n_packets += 2;
291 4 : stats_n_bytes += len0 + len1;
292 :
293 : /* save inner packet flow_hash for load-balance node */
294 4 : vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
295 4 : vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
296 :
297 : /* Batch stats increment on the same geneve tunnel so counter is not
298 : incremented per packet. Note stats are still incremented for deleted
299 : and admin-down tunnel where packets are dropped. It is not worthwhile
300 : to check for this rare case and affect normal path performance. */
301 4 : if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
302 : (sw_if_index1 != stats_sw_if_index)))
303 : {
304 4 : stats_n_packets -= 2;
305 4 : stats_n_bytes -= len0 + len1;
306 4 : if (sw_if_index0 == sw_if_index1)
307 : {
308 0 : if (stats_n_packets)
309 0 : vlib_increment_combined_counter
310 0 : (im->combined_sw_if_counters +
311 : VNET_INTERFACE_COUNTER_TX, thread_index,
312 : stats_sw_if_index, stats_n_packets, stats_n_bytes);
313 0 : stats_sw_if_index = sw_if_index0;
314 0 : stats_n_packets = 2;
315 0 : stats_n_bytes = len0 + len1;
316 : }
317 : else
318 : {
319 4 : vlib_increment_combined_counter
320 4 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
321 : thread_index, sw_if_index0, 1, len0);
322 4 : vlib_increment_combined_counter
323 4 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
324 : thread_index, sw_if_index1, 1, len1);
325 : }
326 : }
327 :
328 4 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
329 : {
330 : geneve_encap_trace_t *tr =
331 4 : vlib_add_trace (vm, node, b[0], sizeof (*tr));
332 4 : tr->tunnel_index = t0 - vxm->tunnels;
333 4 : tr->vni = t0->vni;
334 : }
335 :
336 4 : if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
337 : {
338 : geneve_encap_trace_t *tr =
339 4 : vlib_add_trace (vm, node, b[1], sizeof (*tr));
340 4 : tr->tunnel_index = t1 - vxm->tunnels;
341 4 : tr->vni = t1->vni;
342 : }
343 4 : b += 2;
344 :
345 4 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
346 : to_next, n_left_to_next,
347 : bi0, bi1, next0, next1);
348 : }
349 :
350 11 : while (n_left_from > 0 && n_left_to_next > 0)
351 : {
352 : u32 bi0;
353 : u32 flow_hash0;
354 : u32 len0;
355 : ip4_header_t *ip4_0;
356 : ip6_header_t *ip6_0;
357 : udp_header_t *udp0;
358 : u64 *copy_src0, *copy_dst0;
359 : u32 *copy_src_last0, *copy_dst_last0;
360 : u16 new_l0;
361 : ip_csum_t sum0;
362 :
363 6 : bi0 = from[0];
364 6 : to_next[0] = bi0;
365 6 : from += 1;
366 6 : to_next += 1;
367 6 : n_left_from -= 1;
368 6 : n_left_to_next -= 1;
369 :
370 6 : flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
371 :
372 : /* Get next node index and adj index from tunnel next_dpo */
373 6 : if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
374 : {
375 6 : sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
376 6 : hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
377 6 : t0 = &vxm->tunnels[hi0->dev_instance];
378 : /* Note: change to always set next0 if it may be set to drop */
379 6 : next0 = t0->next_dpo.dpoi_next_node;
380 : }
381 :
382 6 : ALWAYS_ASSERT (t0 != NULL);
383 :
384 6 : vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
385 :
386 : /* Apply the rewrite string. $$$$ vnet_rewrite? */
387 6 : vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
388 :
389 6 : if (is_ip4)
390 : {
391 6 : u8 ip4_geneve_base_header_len =
392 : sizeof (ip4_header_t) + sizeof (udp_header_t) +
393 : GENEVE_BASE_HEADER_LENGTH;
394 6 : u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
395 : #if SUPPORT_OPTIONS_HEADER==1
396 : ip4_geneve_header_total_len0 += t0->options_len;
397 : #endif
398 6 : ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
399 :
400 6 : ip4_0 = vlib_buffer_get_current (b[0]);
401 :
402 : /* Copy the fixed header */
403 6 : copy_dst0 = (u64 *) ip4_0;
404 6 : copy_src0 = (u64 *) t0->rewrite;
405 : /* Copy first 32 octets 8-bytes at a time */
406 : #define _(offs) copy_dst0[offs] = copy_src0[offs];
407 6 : foreach_fixed_header4_offset;
408 : #undef _
409 : /* Last 4 octets. Hopefully gcc will be our friend */
410 6 : copy_dst_last0 = (u32 *) (©_dst0[4]);
411 6 : copy_src_last0 = (u32 *) (©_src0[4]);
412 6 : copy_dst_last0[0] = copy_src_last0[0];
413 :
414 : /* Fix the IP4 checksum and length */
415 6 : sum0 = ip4_0->checksum;
416 : new_l0 = /* old_l0 always 0, see the rewrite setup */
417 6 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
418 6 : sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
419 : length /* changed member */ );
420 6 : ip4_0->checksum = ip_csum_fold (sum0);
421 6 : ip4_0->length = new_l0;
422 :
423 : /* Fix UDP length and set source port */
424 6 : udp0 = (udp_header_t *) (ip4_0 + 1);
425 : new_l0 =
426 6 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
427 : sizeof (*ip4_0));
428 6 : udp0->length = new_l0;
429 6 : udp0->src_port = flow_hash0;
430 : }
431 :
432 : else /* ip6 path */
433 : {
434 0 : int bogus = 0;
435 :
436 0 : u8 ip6_geneve_base_header_len =
437 : sizeof (ip6_header_t) + sizeof (udp_header_t) +
438 : GENEVE_BASE_HEADER_LENGTH;
439 0 : u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
440 : #if SUPPORT_OPTIONS_HEADER==1
441 : ip6_geneve_header_total_len0 += t0->options_len;
442 : #endif
443 0 : ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
444 :
445 0 : ip6_0 = vlib_buffer_get_current (b[0]);
446 : /* Copy the fixed header */
447 0 : copy_dst0 = (u64 *) ip6_0;
448 0 : copy_src0 = (u64 *) t0->rewrite;
449 : /* Copy first 56 (ip6) octets 8-bytes at a time */
450 : #define _(offs) copy_dst0[offs] = copy_src0[offs];
451 0 : foreach_fixed_header6_offset;
452 : #undef _
453 : /* Fix IP6 payload length */
454 : new_l0 =
455 0 : clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
456 0 : - sizeof (*ip6_0));
457 0 : ip6_0->payload_length = new_l0;
458 :
459 : /* Fix UDP length and set source port */
460 0 : udp0 = (udp_header_t *) (ip6_0 + 1);
461 0 : udp0->length = new_l0;
462 0 : udp0->src_port = flow_hash0;
463 :
464 : /* IPv6 UDP checksum is mandatory */
465 0 : udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
466 : ip6_0,
467 : &bogus);
468 0 : ASSERT (bogus == 0);
469 0 : if (udp0->checksum == 0)
470 0 : udp0->checksum = 0xffff;
471 : }
472 :
473 6 : pkts_encapsulated++;
474 6 : len0 = vlib_buffer_length_in_chain (vm, b[0]);
475 6 : stats_n_packets += 1;
476 6 : stats_n_bytes += len0;
477 :
478 : /* save inner packet flow_hash for load-balance node */
479 6 : vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
480 :
481 : /* Batch stats increment on the same geneve tunnel so counter is not
482 : incremented per packet. Note stats are still incremented for deleted
483 : and admin-down tunnel where packets are dropped. It is not worthwhile
484 : to check for this rare case and affect normal path performance. */
485 6 : if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
486 : {
487 5 : stats_n_packets -= 1;
488 5 : stats_n_bytes -= len0;
489 5 : if (stats_n_packets)
490 1 : vlib_increment_combined_counter
491 1 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
492 : thread_index, stats_sw_if_index,
493 : stats_n_packets, stats_n_bytes);
494 5 : stats_n_packets = 1;
495 5 : stats_n_bytes = len0;
496 5 : stats_sw_if_index = sw_if_index0;
497 : }
498 :
499 6 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
500 : {
501 : geneve_encap_trace_t *tr =
502 5 : vlib_add_trace (vm, node, b[0], sizeof (*tr));
503 5 : tr->tunnel_index = t0 - vxm->tunnels;
504 5 : tr->vni = t0->vni;
505 : }
506 6 : b += 1;
507 :
508 6 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
509 : to_next, n_left_to_next,
510 : bi0, next0);
511 : }
512 :
513 5 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
514 : }
515 :
516 : /* Do we still need this now that tunnel tx stats is kept? */
517 5 : vlib_node_increment_counter (vm, node->node_index,
518 : GENEVE_ENCAP_ERROR_ENCAPSULATED,
519 : pkts_encapsulated);
520 :
521 : /* Increment any remaining batch stats */
522 5 : if (stats_n_packets)
523 : {
524 5 : vlib_increment_combined_counter
525 5 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
526 : thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
527 5 : node->runtime_data[0] = stats_sw_if_index;
528 : }
529 :
530 5 : return from_frame->n_vectors;
531 : }
532 :
533 2241 : VLIB_NODE_FN (geneve4_encap_node) (vlib_main_t * vm,
534 : vlib_node_runtime_t * node,
535 : vlib_frame_t * from_frame)
536 : {
537 5 : return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
538 : }
539 :
540 2236 : VLIB_NODE_FN (geneve6_encap_node) (vlib_main_t * vm,
541 : vlib_node_runtime_t * node,
542 : vlib_frame_t * from_frame)
543 : {
544 0 : return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
545 : }
546 :
547 : /* *INDENT-OFF* */
548 136120 : VLIB_REGISTER_NODE (geneve4_encap_node) = {
549 : .name = "geneve4-encap",
550 : .vector_size = sizeof (u32),
551 : .format_trace = format_geneve_encap_trace,
552 : .type = VLIB_NODE_TYPE_INTERNAL,
553 : .n_errors = ARRAY_LEN (geneve_encap_error_strings),
554 : .error_strings = geneve_encap_error_strings,
555 : .n_next_nodes = GENEVE_ENCAP_N_NEXT,
556 : .next_nodes = {
557 : [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
558 : },
559 : };
560 :
561 136120 : VLIB_REGISTER_NODE (geneve6_encap_node) = {
562 : .name = "geneve6-encap",
563 : .vector_size = sizeof (u32),
564 : .format_trace = format_geneve_encap_trace,
565 : .type = VLIB_NODE_TYPE_INTERNAL,
566 : .n_errors = ARRAY_LEN (geneve_encap_error_strings),
567 : .error_strings = geneve_encap_error_strings,
568 : .n_next_nodes = GENEVE_ENCAP_N_NEXT,
569 : .next_nodes = {
570 : [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
571 : },
572 : };
573 : /* *INDENT-ON* */
574 :
575 : /*
576 : * fd.io coding-style-patch-verification: ON
577 : *
578 : * Local Variables:
579 : * eval: (c-set-style "gnu")
580 : * End:
581 : */
|