Line data Source code
1 :
2 : /*
3 : * Copyright (c) 2015 Cisco and/or its affiliates.
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at:
7 : *
8 : * http://www.apache.org/licenses/LICENSE-2.0
9 : *
10 : * Unless required by applicable law or agreed to in writing, software
11 : * distributed under the License is distributed on an "AS IS" BASIS,
12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : * See the License for the specific language governing permissions and
14 : * limitations under the License.
15 : */
16 : #include <vppinfra/error.h>
17 : #include <vppinfra/hash.h>
18 : #include <vnet/vnet.h>
19 : #include <vnet/ip/ip.h>
20 : #include <vnet/ethernet/ethernet.h>
21 : #include <vnet/interface_output.h>
22 : #include <vxlan/vxlan.h>
23 : #include <vnet/qos/qos_types.h>
24 : #include <vnet/adj/rewrite.h>
25 :
26 : /* Statistics (not all errors) */
27 : #define foreach_vxlan_encap_error \
28 : _(ENCAPSULATED, "good packets encapsulated")
29 :
30 : static char *vxlan_encap_error_strings[] = {
31 : #define _(sym,string) string,
32 : foreach_vxlan_encap_error
33 : #undef _
34 : };
35 :
36 : typedef enum
37 : {
38 : #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
39 : foreach_vxlan_encap_error
40 : #undef _
41 : VXLAN_ENCAP_N_ERROR,
42 : } vxlan_encap_error_t;
43 :
44 : typedef enum
45 : {
46 : VXLAN_ENCAP_NEXT_DROP,
47 : VXLAN_ENCAP_N_NEXT,
48 : } vxlan_encap_next_t;
49 :
50 : typedef struct
51 : {
52 : u32 tunnel_index;
53 : u32 vni;
54 : } vxlan_encap_trace_t;
55 :
56 : #ifndef CLIB_MARCH_VARIANT
57 : u8 *
58 396 : format_vxlan_encap_trace (u8 * s, va_list * args)
59 : {
60 396 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 396 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 396 : vxlan_encap_trace_t *t = va_arg (*args, vxlan_encap_trace_t *);
63 :
64 396 : s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
65 : t->tunnel_index, t->vni);
66 396 : return s;
67 : }
68 : #endif
69 :
70 : always_inline uword
71 28 : vxlan_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
72 : vlib_frame_t *from_frame, u8 is_ip4)
73 : {
74 : u32 n_left_from, next_index, *from, *to_next;
75 28 : vxlan_main_t *vxm = &vxlan_main;
76 28 : vnet_main_t *vnm = vxm->vnet_main;
77 28 : vnet_interface_main_t *im = &vnm->interface_main;
78 28 : vlib_combined_counter_main_t *tx_counter =
79 28 : im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
80 28 : u32 pkts_encapsulated = 0;
81 28 : u32 thread_index = vlib_get_thread_index ();
82 28 : u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 28 : u32 next0 = 0, next1 = 0;
84 28 : vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
85 28 : index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
86 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
87 28 : vlib_buffer_t **b = bufs;
88 :
89 28 : from = vlib_frame_vector_args (from_frame);
90 28 : n_left_from = from_frame->n_vectors;
91 :
92 28 : next_index = node->cached_next_index;
93 :
94 : STATIC_ASSERT_SIZEOF (ip6_vxlan_header_t, 56);
95 : STATIC_ASSERT_SIZEOF (ip4_vxlan_header_t, 36);
96 :
97 28 : u8 const underlay_hdr_len = is_ip4 ?
98 : sizeof (ip4_vxlan_header_t) : sizeof (ip6_vxlan_header_t);
99 28 : u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
100 28 : u32 const outer_packet_csum_offload_flags =
101 : is_ip4 ? (VNET_BUFFER_OFFLOAD_F_OUTER_IP_CKSUM |
102 28 : VNET_BUFFER_OFFLOAD_F_TNL_VXLAN) :
103 : (VNET_BUFFER_OFFLOAD_F_OUTER_UDP_CKSUM |
104 : VNET_BUFFER_OFFLOAD_F_TNL_VXLAN);
105 :
106 28 : vlib_get_buffers (vm, from, bufs, n_left_from);
107 :
108 56 : while (n_left_from > 0)
109 : {
110 : u32 n_left_to_next;
111 :
112 28 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
113 :
114 193 : while (n_left_from >= 4 && n_left_to_next >= 2)
115 : {
116 : /* Prefetch next iteration. */
117 : {
118 165 : vlib_prefetch_buffer_header (b[2], LOAD);
119 165 : vlib_prefetch_buffer_header (b[3], LOAD);
120 :
121 165 : CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
122 : 2 * CLIB_CACHE_LINE_BYTES, LOAD);
123 165 : CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
124 : 2 * CLIB_CACHE_LINE_BYTES, LOAD);
125 : }
126 :
127 165 : u32 bi0 = to_next[0] = from[0];
128 165 : u32 bi1 = to_next[1] = from[1];
129 165 : from += 2;
130 165 : to_next += 2;
131 165 : n_left_to_next -= 2;
132 165 : n_left_from -= 2;
133 :
134 165 : vlib_buffer_t *b0 = b[0];
135 165 : vlib_buffer_t *b1 = b[1];
136 165 : b += 2;
137 :
138 165 : u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
139 165 : u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
140 :
141 : /* Get next node index and adj index from tunnel next_dpo */
142 165 : if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
143 : {
144 27 : sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
145 : vnet_hw_interface_t *hi0 =
146 27 : vnet_get_sup_hw_interface (vnm, sw_if_index0);
147 27 : t0 = &vxm->tunnels[hi0->dev_instance];
148 : /* Note: change to always set next0 if it may set to drop */
149 27 : next0 = t0->next_dpo.dpoi_next_node;
150 27 : dpoi_idx0 = t0->next_dpo.dpoi_index;
151 : }
152 :
153 : /* Get next node index and adj index from tunnel next_dpo */
154 165 : if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
155 : {
156 27 : if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
157 : {
158 11 : sw_if_index1 = sw_if_index0;
159 11 : t1 = t0;
160 11 : next1 = next0;
161 11 : dpoi_idx1 = dpoi_idx0;
162 : }
163 : else
164 : {
165 16 : sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
166 : vnet_hw_interface_t *hi1 =
167 16 : vnet_get_sup_hw_interface (vnm, sw_if_index1);
168 16 : t1 = &vxm->tunnels[hi1->dev_instance];
169 : /* Note: change to always set next1 if it may set to drop */
170 16 : next1 = t1->next_dpo.dpoi_next_node;
171 16 : dpoi_idx1 = t1->next_dpo.dpoi_index;
172 : }
173 : }
174 :
175 165 : vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
176 165 : vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
177 :
178 165 : ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
179 165 : ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
180 165 : vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
181 : vlib_buffer_get_current (b1),
182 : underlay_hdr_len);
183 :
184 165 : vlib_buffer_advance (b0, -underlay_hdr_len);
185 165 : vlib_buffer_advance (b1, -underlay_hdr_len);
186 :
187 165 : u32 len0 = vlib_buffer_length_in_chain (vm, b0);
188 165 : u32 len1 = vlib_buffer_length_in_chain (vm, b1);
189 165 : u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
190 165 : u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
191 :
192 165 : void *underlay0 = vlib_buffer_get_current (b0);
193 165 : void *underlay1 = vlib_buffer_get_current (b1);
194 :
195 : ip4_header_t *ip4_0, *ip4_1;
196 165 : qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
197 : ip6_header_t *ip6_0, *ip6_1;
198 : udp_header_t *udp0, *udp1;
199 : u8 *l3_0, *l3_1;
200 165 : if (is_ip4)
201 : {
202 145 : ip4_vxlan_header_t *hdr0 = underlay0;
203 145 : ip4_vxlan_header_t *hdr1 = underlay1;
204 :
205 : /* Fix the IP4 checksum and length */
206 145 : ip4_0 = &hdr0->ip4;
207 145 : ip4_1 = &hdr1->ip4;
208 145 : ip4_0->length = clib_host_to_net_u16 (len0);
209 145 : ip4_1->length = clib_host_to_net_u16 (len1);
210 :
211 145 : if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
212 : {
213 0 : ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
214 0 : ip4_0->tos = ip4_0_tos;
215 : }
216 145 : if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
217 : {
218 0 : ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
219 0 : ip4_1->tos = ip4_1_tos;
220 : }
221 :
222 145 : l3_0 = (u8 *) ip4_0;
223 145 : l3_1 = (u8 *) ip4_1;
224 145 : udp0 = &hdr0->udp;
225 145 : udp1 = &hdr1->udp;
226 : }
227 : else /* ipv6 */
228 : {
229 20 : ip6_vxlan_header_t *hdr0 = underlay0;
230 20 : ip6_vxlan_header_t *hdr1 = underlay1;
231 :
232 : /* Fix IP6 payload length */
233 20 : ip6_0 = &hdr0->ip6;
234 20 : ip6_1 = &hdr1->ip6;
235 20 : ip6_0->payload_length = payload_l0;
236 20 : ip6_1->payload_length = payload_l1;
237 :
238 20 : l3_0 = (u8 *) ip6_0;
239 20 : l3_1 = (u8 *) ip6_1;
240 20 : udp0 = &hdr0->udp;
241 20 : udp1 = &hdr1->udp;
242 : }
243 :
244 : /* Fix UDP length and set source port */
245 165 : udp0->length = payload_l0;
246 165 : udp0->src_port = flow_hash0;
247 165 : udp1->length = payload_l1;
248 165 : udp1->src_port = flow_hash1;
249 :
250 165 : if (b0->flags & VNET_BUFFER_F_OFFLOAD)
251 : {
252 0 : vnet_buffer2 (b0)->outer_l3_hdr_offset = l3_0 - b0->data;
253 0 : vnet_buffer2 (b0)->outer_l4_hdr_offset = (u8 *) udp0 - b0->data;
254 0 : vnet_buffer_offload_flags_set (b0,
255 : outer_packet_csum_offload_flags);
256 : }
257 : /* IPv4 checksum only */
258 165 : else if (is_ip4)
259 : {
260 145 : ip_csum_t sum0 = ip4_0->checksum;
261 145 : sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
262 : length /* changed member */);
263 145 : if (PREDICT_FALSE (ip4_0_tos))
264 : {
265 0 : sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
266 : tos /* changed member */);
267 : }
268 145 : ip4_0->checksum = ip_csum_fold (sum0);
269 : }
270 : /* IPv6 UDP checksum is mandatory */
271 : else
272 : {
273 20 : int bogus = 0;
274 :
275 20 : udp0->checksum =
276 20 : ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus);
277 20 : ASSERT (bogus == 0);
278 20 : if (udp0->checksum == 0)
279 0 : udp0->checksum = 0xffff;
280 : }
281 :
282 165 : if (b1->flags & VNET_BUFFER_F_OFFLOAD)
283 : {
284 0 : vnet_buffer2 (b1)->outer_l3_hdr_offset = l3_1 - b1->data;
285 0 : vnet_buffer2 (b1)->outer_l4_hdr_offset = (u8 *) udp1 - b1->data;
286 0 : vnet_buffer_offload_flags_set (b1,
287 : outer_packet_csum_offload_flags);
288 : }
289 : /* IPv4 checksum only */
290 165 : else if (is_ip4)
291 : {
292 145 : ip_csum_t sum1 = ip4_1->checksum;
293 145 : sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
294 : length /* changed member */);
295 145 : if (PREDICT_FALSE (ip4_1_tos))
296 : {
297 0 : sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
298 : tos /* changed member */);
299 : }
300 145 : ip4_1->checksum = ip_csum_fold (sum1);
301 : }
302 : /* IPv6 UDP checksum is mandatory */
303 : else
304 : {
305 20 : int bogus = 0;
306 :
307 20 : udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
308 : (vm, b1, ip6_1, &bogus);
309 20 : ASSERT (bogus == 0);
310 20 : if (udp1->checksum == 0)
311 0 : udp1->checksum = 0xffff;
312 : }
313 :
314 : /* save inner packet flow_hash for load-balance node */
315 165 : vnet_buffer (b0)->ip.flow_hash = flow_hash0;
316 165 : vnet_buffer (b1)->ip.flow_hash = flow_hash1;
317 :
318 165 : if (sw_if_index0 == sw_if_index1)
319 : {
320 149 : vlib_increment_combined_counter (tx_counter, thread_index,
321 149 : sw_if_index0, 2, len0 + len1);
322 : }
323 : else
324 : {
325 16 : vlib_increment_combined_counter (tx_counter, thread_index,
326 : sw_if_index0, 1, len0);
327 16 : vlib_increment_combined_counter (tx_counter, thread_index,
328 : sw_if_index1, 1, len1);
329 : }
330 165 : pkts_encapsulated += 2;
331 :
332 165 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
333 : {
334 : vxlan_encap_trace_t *tr =
335 165 : vlib_add_trace (vm, node, b0, sizeof (*tr));
336 165 : tr->tunnel_index = t0 - vxm->tunnels;
337 165 : tr->vni = t0->vni;
338 : }
339 :
340 165 : if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
341 : {
342 : vxlan_encap_trace_t *tr =
343 165 : vlib_add_trace (vm, node, b1, sizeof (*tr));
344 165 : tr->tunnel_index = t1 - vxm->tunnels;
345 165 : tr->vni = t1->vni;
346 : }
347 :
348 165 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
349 : to_next, n_left_to_next,
350 : bi0, bi1, next0, next1);
351 : }
352 :
353 79 : while (n_left_from > 0 && n_left_to_next > 0)
354 : {
355 51 : u32 bi0 = to_next[0] = from[0];
356 51 : from += 1;
357 51 : to_next += 1;
358 51 : n_left_from -= 1;
359 51 : n_left_to_next -= 1;
360 :
361 51 : vlib_buffer_t *b0 = b[0];
362 51 : b += 1;
363 :
364 51 : u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
365 :
366 : /* Get next node index and adj index from tunnel next_dpo */
367 51 : if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
368 : {
369 21 : sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
370 : vnet_hw_interface_t *hi0 =
371 21 : vnet_get_sup_hw_interface (vnm, sw_if_index0);
372 21 : t0 = &vxm->tunnels[hi0->dev_instance];
373 : /* Note: change to always set next0 if it may be set to drop */
374 21 : next0 = t0->next_dpo.dpoi_next_node;
375 21 : dpoi_idx0 = t0->next_dpo.dpoi_index;
376 : }
377 51 : vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
378 :
379 51 : ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
380 51 : vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
381 : underlay_hdr_len);
382 :
383 51 : vlib_buffer_advance (b0, -underlay_hdr_len);
384 51 : void *underlay0 = vlib_buffer_get_current (b0);
385 :
386 51 : u32 len0 = vlib_buffer_length_in_chain (vm, b0);
387 51 : u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
388 :
389 : udp_header_t *udp0;
390 : ip4_header_t *ip4_0;
391 51 : qos_bits_t ip4_0_tos = 0;
392 : ip6_header_t *ip6_0;
393 : u8 *l3_0;
394 51 : if (is_ip4)
395 : {
396 24 : ip4_vxlan_header_t *hdr = underlay0;
397 :
398 : /* Fix the IP4 checksum and length */
399 24 : ip4_0 = &hdr->ip4;
400 24 : ip4_0->length = clib_host_to_net_u16 (len0);
401 :
402 24 : if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
403 : {
404 0 : ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
405 0 : ip4_0->tos = ip4_0_tos;
406 : }
407 :
408 24 : l3_0 = (u8 *) ip4_0;
409 24 : udp0 = &hdr->udp;
410 : }
411 : else /* ip6 path */
412 : {
413 27 : ip6_vxlan_header_t *hdr = underlay0;
414 :
415 : /* Fix IP6 payload length */
416 27 : ip6_0 = &hdr->ip6;
417 27 : ip6_0->payload_length = payload_l0;
418 :
419 27 : l3_0 = (u8 *) ip6_0;
420 27 : udp0 = &hdr->udp;
421 : }
422 :
423 : /* Fix UDP length and set source port */
424 51 : udp0->length = payload_l0;
425 51 : udp0->src_port = flow_hash0;
426 :
427 51 : if (b0->flags & VNET_BUFFER_F_OFFLOAD)
428 : {
429 0 : vnet_buffer2 (b0)->outer_l3_hdr_offset = l3_0 - b0->data;
430 0 : vnet_buffer2 (b0)->outer_l4_hdr_offset = (u8 *) udp0 - b0->data;
431 0 : vnet_buffer_offload_flags_set (b0,
432 : outer_packet_csum_offload_flags);
433 : }
434 : /* IPv4 checksum only */
435 51 : else if (is_ip4)
436 : {
437 24 : ip_csum_t sum0 = ip4_0->checksum;
438 24 : sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
439 : length /* changed member */);
440 24 : if (PREDICT_FALSE (ip4_0_tos))
441 : {
442 0 : sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
443 : tos /* changed member */);
444 : }
445 24 : ip4_0->checksum = ip_csum_fold (sum0);
446 : }
447 : /* IPv6 UDP checksum is mandatory */
448 : else
449 : {
450 27 : int bogus = 0;
451 :
452 27 : udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
453 : (vm, b0, ip6_0, &bogus);
454 27 : ASSERT (bogus == 0);
455 27 : if (udp0->checksum == 0)
456 0 : udp0->checksum = 0xffff;
457 : }
458 :
459 : /* reuse inner packet flow_hash for load-balance node */
460 51 : vnet_buffer (b0)->ip.flow_hash = flow_hash0;
461 :
462 51 : vlib_increment_combined_counter (tx_counter, thread_index,
463 : sw_if_index0, 1, len0);
464 51 : pkts_encapsulated++;
465 :
466 51 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
467 : {
468 : vxlan_encap_trace_t *tr =
469 50 : vlib_add_trace (vm, node, b0, sizeof (*tr));
470 50 : tr->tunnel_index = t0 - vxm->tunnels;
471 50 : tr->vni = t0->vni;
472 : }
473 51 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
474 : to_next, n_left_to_next,
475 : bi0, next0);
476 : }
477 :
478 28 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
479 : }
480 :
481 : /* Do we still need this now that tunnel tx stats is kept? */
482 28 : vlib_node_increment_counter (vm, node->node_index,
483 : VXLAN_ENCAP_ERROR_ENCAPSULATED,
484 : pkts_encapsulated);
485 :
486 28 : return from_frame->n_vectors;
487 : }
488 :
489 2251 : VLIB_NODE_FN (vxlan4_encap_node) (vlib_main_t * vm,
490 : vlib_node_runtime_t * node,
491 : vlib_frame_t * from_frame)
492 : {
493 : /* Disable chksum offload as setup overhead in tx node is not worthwhile
494 : for ip4 header checksum only, unless udp checksum is also required */
495 15 : return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
496 : }
497 :
498 2249 : VLIB_NODE_FN (vxlan6_encap_node) (vlib_main_t * vm,
499 : vlib_node_runtime_t * node,
500 : vlib_frame_t * from_frame)
501 : {
502 : /* Enable checksum offload for ip6 as udp checksum is mandatory, */
503 13 : return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
504 : }
505 :
506 : /* *INDENT-OFF* */
507 8959 : VLIB_REGISTER_NODE (vxlan4_encap_node) = {
508 : .name = "vxlan4-encap",
509 : .vector_size = sizeof (u32),
510 : .format_trace = format_vxlan_encap_trace,
511 : .type = VLIB_NODE_TYPE_INTERNAL,
512 : .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
513 : .error_strings = vxlan_encap_error_strings,
514 : .n_next_nodes = VXLAN_ENCAP_N_NEXT,
515 : .next_nodes = {
516 : [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
517 : },
518 : };
519 :
520 8959 : VLIB_REGISTER_NODE (vxlan6_encap_node) = {
521 : .name = "vxlan6-encap",
522 : .vector_size = sizeof (u32),
523 : .format_trace = format_vxlan_encap_trace,
524 : .type = VLIB_NODE_TYPE_INTERNAL,
525 : .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
526 : .error_strings = vxlan_encap_error_strings,
527 : .n_next_nodes = VXLAN_ENCAP_N_NEXT,
528 : .next_nodes = {
529 : [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
530 : },
531 : };
532 : /* *INDENT-ON* */
533 :
534 : /*
535 : * fd.io coding-style-patch-verification: ON
536 : *
537 : * Local Variables:
538 : * eval: (c-set-style "gnu")
539 : * End:
540 : */
|