Line data Source code
1 : /*
2 : * decap.c: gtpu tunnel decap packet processing
3 : *
4 : * Copyright (c) 2017 Intel and/or its affiliates.
5 : * Licensed under the Apache License, Version 2.0 (the "License");
6 : * you may not use this file except in compliance with the License.
7 : * You may obtain a copy of the License at:
8 : *
9 : * http://www.apache.org/licenses/LICENSE-2.0
10 : *
11 : * Unless required by applicable law or agreed to in writing, software
12 : * distributed under the License is distributed on an "AS IS" BASIS,
13 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 : * See the License for the specific language governing permissions and
15 : * limitations under the License.
16 : */
17 :
18 : #include <vlib/vlib.h>
19 : #include <gtpu/gtpu.h>
20 :
21 : extern vlib_node_registration_t gtpu4_input_node;
22 : extern vlib_node_registration_t gtpu6_input_node;
23 :
24 : typedef struct {
25 : u32 next_index;
26 : u32 tunnel_index;
27 : u32 error;
28 : u32 teid;
29 : } gtpu_rx_trace_t;
30 :
31 43 : static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
32 : {
33 43 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
34 43 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
35 43 : gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
36 :
37 43 : if (t->tunnel_index != ~0)
38 : {
39 22 : s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
40 : t->tunnel_index, t->teid, t->next_index, t->error);
41 : }
42 : else
43 : {
44 21 : s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
45 : t->teid);
46 : }
47 43 : return s;
48 : }
49 :
50 : always_inline u32
51 11 : validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
52 : {
53 11 : return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
54 : }
55 :
56 : always_inline uword
57 4 : gtpu_input (vlib_main_t * vm,
58 : vlib_node_runtime_t * node,
59 : vlib_frame_t * from_frame,
60 : u32 is_ip4)
61 : {
62 : u32 n_left_from, next_index, * from, * to_next;
63 4 : gtpu_main_t * gtm = >pu_main;
64 4 : vnet_main_t * vnm = gtm->vnet_main;
65 4 : vnet_interface_main_t * im = &vnm->interface_main;
66 4 : u32 last_tunnel_index = ~0;
67 : gtpu4_tunnel_key_t last_key4;
68 : gtpu6_tunnel_key_t last_key6;
69 4 : u32 pkts_decapsulated = 0;
70 4 : u32 thread_index = vlib_get_thread_index();
71 : u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
72 :
73 4 : if (is_ip4)
74 3 : last_key4.as_u64 = ~0;
75 : else
76 1 : clib_memset (&last_key6, 0xff, sizeof (last_key6));
77 :
78 4 : from = vlib_frame_vector_args (from_frame);
79 4 : n_left_from = from_frame->n_vectors;
80 :
81 4 : next_index = node->cached_next_index;
82 4 : stats_sw_if_index = node->runtime_data[0];
83 4 : stats_n_packets = stats_n_bytes = 0;
84 :
85 8 : while (n_left_from > 0)
86 : {
87 : u32 n_left_to_next;
88 :
89 4 : vlib_get_next_frame (vm, node, next_index,
90 : to_next, n_left_to_next);
91 13 : while (n_left_from >= 4 && n_left_to_next >= 2)
92 : {
93 : u32 bi0, bi1;
94 : vlib_buffer_t * b0, * b1;
95 : u32 next0, next1;
96 : ip4_header_t * ip4_0, * ip4_1;
97 : ip6_header_t * ip6_0, * ip6_1;
98 : gtpu_header_t * gtpu0, * gtpu1;
99 : u32 gtpu_hdr_len0, gtpu_hdr_len1;
100 : uword * p0, * p1;
101 : u32 tunnel_index0, tunnel_index1;
102 9 : gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
103 : gtpu4_tunnel_key_t key4_0, key4_1;
104 : gtpu6_tunnel_key_t key6_0, key6_1;
105 : u32 error0, error1;
106 : u32 sw_if_index0, sw_if_index1, len0, len1;
107 : u8 has_space0, has_space1;
108 : u8 ver0, ver1;
109 :
110 : /* Prefetch next iteration. */
111 : {
112 : vlib_buffer_t * p2, * p3;
113 :
114 9 : p2 = vlib_get_buffer (vm, from[2]);
115 9 : p3 = vlib_get_buffer (vm, from[3]);
116 :
117 9 : vlib_prefetch_buffer_header (p2, LOAD);
118 9 : vlib_prefetch_buffer_header (p3, LOAD);
119 :
120 9 : CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
121 9 : CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
122 : }
123 :
124 9 : bi0 = from[0];
125 9 : bi1 = from[1];
126 9 : to_next[0] = bi0;
127 9 : to_next[1] = bi1;
128 9 : from += 2;
129 9 : to_next += 2;
130 9 : n_left_to_next -= 2;
131 9 : n_left_from -= 2;
132 :
133 9 : b0 = vlib_get_buffer (vm, bi0);
134 9 : b1 = vlib_get_buffer (vm, bi1);
135 :
136 : /* udp leaves current_data pointing at the gtpu header */
137 9 : gtpu0 = vlib_buffer_get_current (b0);
138 9 : gtpu1 = vlib_buffer_get_current (b1);
139 9 : if (is_ip4)
140 : {
141 9 : ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
142 9 : ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
143 : }
144 : else
145 : {
146 0 : ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
147 0 : ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
148 : }
149 :
150 9 : tunnel_index0 = ~0;
151 9 : error0 = 0;
152 :
153 9 : tunnel_index1 = ~0;
154 9 : error1 = 0;
155 :
156 : /* speculatively load gtp header version field */
157 9 : ver0 = gtpu0->ver_flags;
158 9 : ver1 = gtpu1->ver_flags;
159 :
160 : /*
161 : * Manipulate gtpu header
162 : * TBD: Manipulate Sequence Number and N-PDU Number
163 : * TBD: Manipulate Next Extension Header
164 : */
165 9 : gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
166 9 : gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
167 :
168 9 : has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
169 9 : has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
170 :
171 9 : if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
172 : {
173 0 : error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
174 0 : next0 = GTPU_INPUT_NEXT_DROP;
175 0 : goto trace0;
176 : }
177 :
178 : /* Manipulate packet 0 */
179 9 : if (is_ip4) {
180 9 : key4_0.src = ip4_0->src_address.as_u32;
181 9 : key4_0.teid = gtpu0->teid;
182 :
183 : /* Make sure GTPU tunnel exist according to packet SIP and teid
184 : * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
185 9 : if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
186 : {
187 9 : p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
188 9 : if (PREDICT_FALSE (p0 == NULL))
189 : {
190 4 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
191 4 : next0 = GTPU_INPUT_NEXT_DROP;
192 4 : goto trace0;
193 : }
194 5 : last_key4.as_u64 = key4_0.as_u64;
195 5 : tunnel_index0 = last_tunnel_index = p0[0];
196 : }
197 : else
198 0 : tunnel_index0 = last_tunnel_index;
199 5 : t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
200 :
201 : /* Validate GTPU tunnel encap-fib index against packet */
202 5 : if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
203 : {
204 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205 0 : next0 = GTPU_INPUT_NEXT_DROP;
206 0 : goto trace0;
207 : }
208 :
209 : /* Validate GTPU tunnel SIP against packet DIP */
210 5 : if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
211 0 : goto next0; /* valid packet */
212 5 : if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
213 : {
214 5 : key4_0.src = ip4_0->dst_address.as_u32;
215 5 : key4_0.teid = gtpu0->teid;
216 : /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
217 5 : p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
218 5 : if (PREDICT_TRUE (p0 != NULL))
219 : {
220 5 : mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
221 5 : goto next0; /* valid packet */
222 : }
223 : }
224 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
225 0 : next0 = GTPU_INPUT_NEXT_DROP;
226 0 : goto trace0;
227 :
228 : } else /* !is_ip4 */ {
229 0 : key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
230 0 : key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
231 0 : key6_0.teid = gtpu0->teid;
232 :
233 : /* Make sure GTPU tunnel exist according to packet SIP and teid
234 : * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
235 0 : if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
236 : {
237 0 : p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
238 0 : if (PREDICT_FALSE (p0 == NULL))
239 : {
240 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
241 0 : next0 = GTPU_INPUT_NEXT_DROP;
242 0 : goto trace0;
243 : }
244 0 : clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
245 0 : tunnel_index0 = last_tunnel_index = p0[0];
246 : }
247 : else
248 0 : tunnel_index0 = last_tunnel_index;
249 0 : t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
250 :
251 : /* Validate GTPU tunnel encap-fib index against packet */
252 0 : if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
253 : {
254 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255 0 : next0 = GTPU_INPUT_NEXT_DROP;
256 0 : goto trace0;
257 : }
258 :
259 : /* Validate GTPU tunnel SIP against packet DIP */
260 0 : if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
261 : &t0->src.ip6)))
262 0 : goto next0; /* valid packet */
263 0 : if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
264 : {
265 0 : key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
266 0 : key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
267 0 : key6_0.teid = gtpu0->teid;
268 0 : p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
269 0 : if (PREDICT_TRUE (p0 != NULL))
270 : {
271 0 : mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
272 0 : goto next0; /* valid packet */
273 : }
274 : }
275 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
276 0 : next0 = GTPU_INPUT_NEXT_DROP;
277 0 : goto trace0;
278 : }
279 :
280 5 : next0:
281 : /* Pop gtpu header */
282 5 : vlib_buffer_advance (b0, gtpu_hdr_len0);
283 :
284 5 : next0 = t0->decap_next_index;
285 5 : sw_if_index0 = t0->sw_if_index;
286 5 : len0 = vlib_buffer_length_in_chain (vm, b0);
287 :
288 : /* Required to make the l2 tag push / pop code work on l2 subifs */
289 5 : if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
290 5 : vnet_update_l2_len (b0);
291 :
292 : /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
293 5 : vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
294 5 : sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
295 :
296 5 : pkts_decapsulated ++;
297 5 : stats_n_packets += 1;
298 5 : stats_n_bytes += len0;
299 :
300 : /* Batch stats increment on the same gtpu tunnel so counter
301 : is not incremented per packet */
302 5 : if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
303 : {
304 1 : stats_n_packets -= 1;
305 1 : stats_n_bytes -= len0;
306 1 : if (stats_n_packets)
307 0 : vlib_increment_combined_counter
308 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
309 : thread_index, stats_sw_if_index,
310 : stats_n_packets, stats_n_bytes);
311 1 : stats_n_packets = 1;
312 1 : stats_n_bytes = len0;
313 1 : stats_sw_if_index = sw_if_index0;
314 : }
315 :
316 4 : trace0:
317 9 : b0->error = error0 ? node->errors[error0] : 0;
318 :
319 9 : if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
320 : {
321 : gtpu_rx_trace_t *tr
322 9 : = vlib_add_trace (vm, node, b0, sizeof (*tr));
323 9 : tr->next_index = next0;
324 9 : tr->error = error0;
325 9 : tr->tunnel_index = tunnel_index0;
326 9 : tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
327 : }
328 :
329 9 : if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
330 : {
331 0 : error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
332 0 : next1 = GTPU_INPUT_NEXT_DROP;
333 0 : goto trace1;
334 : }
335 :
336 : /* Manipulate packet 1 */
337 9 : if (is_ip4) {
338 9 : key4_1.src = ip4_1->src_address.as_u32;
339 9 : key4_1.teid = gtpu1->teid;
340 :
341 : /* Make sure GTPU tunnel exist according to packet SIP and teid
342 : * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
343 9 : if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
344 : {
345 9 : p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
346 9 : if (PREDICT_FALSE (p1 == NULL))
347 : {
348 4 : error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
349 4 : next1 = GTPU_INPUT_NEXT_DROP;
350 4 : goto trace1;
351 : }
352 5 : last_key4.as_u64 = key4_1.as_u64;
353 5 : tunnel_index1 = last_tunnel_index = p1[0];
354 : }
355 : else
356 0 : tunnel_index1 = last_tunnel_index;
357 5 : t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
358 :
359 : /* Validate GTPU tunnel encap-fib index against packet */
360 5 : if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
361 : {
362 0 : error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363 0 : next1 = GTPU_INPUT_NEXT_DROP;
364 0 : goto trace1;
365 : }
366 :
367 : /* Validate GTPU tunnel SIP against packet DIP */
368 5 : if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
369 0 : goto next1; /* valid packet */
370 5 : if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
371 : {
372 5 : key4_1.src = ip4_1->dst_address.as_u32;
373 5 : key4_1.teid = gtpu1->teid;
374 : /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
375 5 : p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
376 5 : if (PREDICT_TRUE (p1 != NULL))
377 : {
378 5 : mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
379 5 : goto next1; /* valid packet */
380 : }
381 : }
382 0 : error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
383 0 : next1 = GTPU_INPUT_NEXT_DROP;
384 0 : goto trace1;
385 :
386 : } else /* !is_ip4 */ {
387 0 : key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
388 0 : key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
389 0 : key6_1.teid = gtpu1->teid;
390 :
391 : /* Make sure GTPU tunnel exist according to packet SIP and teid
392 : * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
393 0 : if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
394 : {
395 0 : p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
396 :
397 0 : if (PREDICT_FALSE (p1 == NULL))
398 : {
399 0 : error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
400 0 : next1 = GTPU_INPUT_NEXT_DROP;
401 0 : goto trace1;
402 : }
403 :
404 0 : clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
405 0 : tunnel_index1 = last_tunnel_index = p1[0];
406 : }
407 : else
408 0 : tunnel_index1 = last_tunnel_index;
409 0 : t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
410 :
411 : /* Validate GTPU tunnel encap-fib index against packet */
412 0 : if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
413 : {
414 0 : error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
415 0 : next1 = GTPU_INPUT_NEXT_DROP;
416 0 : goto trace1;
417 : }
418 :
419 : /* Validate GTPU tunnel SIP against packet DIP */
420 0 : if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
421 : &t1->src.ip6)))
422 0 : goto next1; /* valid packet */
423 0 : if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
424 : {
425 0 : key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
426 0 : key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
427 0 : key6_1.teid = gtpu1->teid;
428 0 : p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
429 0 : if (PREDICT_TRUE (p1 != NULL))
430 : {
431 0 : mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
432 0 : goto next1; /* valid packet */
433 : }
434 : }
435 0 : error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
436 0 : next1 = GTPU_INPUT_NEXT_DROP;
437 0 : goto trace1;
438 : }
439 :
440 5 : next1:
441 : /* Pop gtpu header */
442 5 : vlib_buffer_advance (b1, gtpu_hdr_len1);
443 :
444 5 : next1 = t1->decap_next_index;
445 5 : sw_if_index1 = t1->sw_if_index;
446 5 : len1 = vlib_buffer_length_in_chain (vm, b1);
447 :
448 : /* Required to make the l2 tag push / pop code work on l2 subifs */
449 5 : if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
450 5 : vnet_update_l2_len (b1);
451 :
452 : /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
453 5 : vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
454 5 : sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
455 :
456 5 : pkts_decapsulated ++;
457 5 : stats_n_packets += 1;
458 5 : stats_n_bytes += len1;
459 :
460 : /* Batch stats increment on the same gtpu tunnel so counter
461 : is not incremented per packet */
462 5 : if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
463 : {
464 0 : stats_n_packets -= 1;
465 0 : stats_n_bytes -= len1;
466 0 : if (stats_n_packets)
467 0 : vlib_increment_combined_counter
468 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
469 : thread_index, stats_sw_if_index,
470 : stats_n_packets, stats_n_bytes);
471 0 : stats_n_packets = 1;
472 0 : stats_n_bytes = len1;
473 0 : stats_sw_if_index = sw_if_index1;
474 : }
475 :
476 5 : trace1:
477 9 : b1->error = error1 ? node->errors[error1] : 0;
478 :
479 9 : if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
480 : {
481 : gtpu_rx_trace_t *tr
482 9 : = vlib_add_trace (vm, node, b1, sizeof (*tr));
483 9 : tr->next_index = next1;
484 9 : tr->error = error1;
485 9 : tr->tunnel_index = tunnel_index1;
486 9 : tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
487 : }
488 :
489 9 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
490 : to_next, n_left_to_next,
491 : bi0, bi1, next0, next1);
492 : }
493 :
494 9 : while (n_left_from > 0 && n_left_to_next > 0)
495 : {
496 : u32 bi0;
497 : vlib_buffer_t * b0;
498 : u32 next0;
499 : ip4_header_t * ip4_0;
500 : ip6_header_t * ip6_0;
501 : gtpu_header_t * gtpu0;
502 : u32 gtpu_hdr_len0;
503 : uword * p0;
504 : u32 tunnel_index0;
505 5 : gtpu_tunnel_t * t0, * mt0 = NULL;
506 : gtpu4_tunnel_key_t key4_0;
507 : gtpu6_tunnel_key_t key6_0;
508 : u32 error0;
509 : u32 sw_if_index0, len0;
510 : u8 has_space0;
511 : u8 ver0;
512 :
513 5 : bi0 = from[0];
514 5 : to_next[0] = bi0;
515 5 : from += 1;
516 5 : to_next += 1;
517 5 : n_left_from -= 1;
518 5 : n_left_to_next -= 1;
519 :
520 5 : b0 = vlib_get_buffer (vm, bi0);
521 :
522 : /* udp leaves current_data pointing at the gtpu header */
523 5 : gtpu0 = vlib_buffer_get_current (b0);
524 5 : if (is_ip4) {
525 4 : ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
526 : } else {
527 1 : ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
528 : }
529 :
530 5 : tunnel_index0 = ~0;
531 5 : error0 = 0;
532 :
533 : /* speculatively load gtp header version field */
534 5 : ver0 = gtpu0->ver_flags;
535 :
536 : /*
537 : * Manipulate gtpu header
538 : * TBD: Manipulate Sequence Number and N-PDU Number
539 : * TBD: Manipulate Next Extension Header
540 : */
541 5 : gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
542 :
543 5 : has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
544 :
545 5 : if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
546 : {
547 2 : error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
548 2 : next0 = GTPU_INPUT_NEXT_DROP;
549 2 : goto trace00;
550 : }
551 :
552 3 : if (is_ip4) {
553 3 : key4_0.src = ip4_0->src_address.as_u32;
554 3 : key4_0.teid = gtpu0->teid;
555 :
556 : /* Make sure GTPU tunnel exist according to packet SIP and teid
557 : * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
558 3 : if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
559 : {
560 3 : p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
561 3 : if (PREDICT_FALSE (p0 == NULL))
562 : {
563 2 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
564 2 : next0 = GTPU_INPUT_NEXT_DROP;
565 2 : goto trace00;
566 : }
567 1 : last_key4.as_u64 = key4_0.as_u64;
568 1 : tunnel_index0 = last_tunnel_index = p0[0];
569 : }
570 : else
571 0 : tunnel_index0 = last_tunnel_index;
572 1 : t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
573 :
574 : /* Validate GTPU tunnel encap-fib index against packet */
575 1 : if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
576 : {
577 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578 0 : next0 = GTPU_INPUT_NEXT_DROP;
579 0 : goto trace00;
580 : }
581 :
582 : /* Validate GTPU tunnel SIP against packet DIP */
583 1 : if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
584 1 : goto next00; /* valid packet */
585 0 : if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
586 : {
587 0 : key4_0.src = ip4_0->dst_address.as_u32;
588 0 : key4_0.teid = gtpu0->teid;
589 : /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
590 0 : p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
591 0 : if (PREDICT_TRUE (p0 != NULL))
592 : {
593 0 : mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
594 0 : goto next00; /* valid packet */
595 : }
596 : }
597 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
598 0 : next0 = GTPU_INPUT_NEXT_DROP;
599 0 : goto trace00;
600 :
601 : } else /* !is_ip4 */ {
602 0 : key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
603 0 : key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
604 0 : key6_0.teid = gtpu0->teid;
605 :
606 : /* Make sure GTPU tunnel exist according to packet SIP and teid
607 : * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
608 0 : if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
609 : {
610 0 : p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
611 0 : if (PREDICT_FALSE (p0 == NULL))
612 : {
613 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
614 0 : next0 = GTPU_INPUT_NEXT_DROP;
615 0 : goto trace00;
616 : }
617 0 : clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
618 0 : tunnel_index0 = last_tunnel_index = p0[0];
619 : }
620 : else
621 0 : tunnel_index0 = last_tunnel_index;
622 0 : t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
623 :
624 : /* Validate GTPU tunnel encap-fib index against packet */
625 0 : if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
626 : {
627 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628 0 : next0 = GTPU_INPUT_NEXT_DROP;
629 0 : goto trace00;
630 : }
631 :
632 : /* Validate GTPU tunnel SIP against packet DIP */
633 0 : if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
634 : &t0->src.ip6)))
635 0 : goto next00; /* valid packet */
636 0 : if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
637 : {
638 0 : key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
639 0 : key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
640 0 : key6_0.teid = gtpu0->teid;
641 0 : p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
642 0 : if (PREDICT_TRUE (p0 != NULL))
643 : {
644 0 : mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
645 0 : goto next00; /* valid packet */
646 : }
647 : }
648 0 : error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
649 0 : next0 = GTPU_INPUT_NEXT_DROP;
650 0 : goto trace00;
651 : }
652 :
653 1 : next00:
654 : /* Pop gtpu header */
655 1 : vlib_buffer_advance (b0, gtpu_hdr_len0);
656 :
657 1 : next0 = t0->decap_next_index;
658 1 : sw_if_index0 = t0->sw_if_index;
659 1 : len0 = vlib_buffer_length_in_chain (vm, b0);
660 :
661 : /* Required to make the l2 tag push / pop code work on l2 subifs */
662 1 : if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
663 1 : vnet_update_l2_len (b0);
664 :
665 : /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
666 1 : vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
667 1 : sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
668 :
669 1 : pkts_decapsulated ++;
670 1 : stats_n_packets += 1;
671 1 : stats_n_bytes += len0;
672 :
673 : /* Batch stats increment on the same gtpu tunnel so counter
674 : is not incremented per packet */
675 1 : if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
676 : {
677 1 : stats_n_packets -= 1;
678 1 : stats_n_bytes -= len0;
679 1 : if (stats_n_packets)
680 0 : vlib_increment_combined_counter
681 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
682 : thread_index, stats_sw_if_index,
683 : stats_n_packets, stats_n_bytes);
684 1 : stats_n_packets = 1;
685 1 : stats_n_bytes = len0;
686 1 : stats_sw_if_index = sw_if_index0;
687 : }
688 :
689 0 : trace00:
690 5 : b0->error = error0 ? node->errors[error0] : 0;
691 :
692 5 : if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
693 : {
694 : gtpu_rx_trace_t *tr
695 5 : = vlib_add_trace (vm, node, b0, sizeof (*tr));
696 5 : tr->next_index = next0;
697 5 : tr->error = error0;
698 5 : tr->tunnel_index = tunnel_index0;
699 5 : tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
700 : }
701 5 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
702 : to_next, n_left_to_next,
703 : bi0, next0);
704 : }
705 :
706 4 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
707 : }
708 : /* Do we still need this now that tunnel tx stats is kept? */
709 4 : vlib_node_increment_counter (vm, is_ip4?
710 : gtpu4_input_node.index:gtpu6_input_node.index,
711 : GTPU_ERROR_DECAPSULATED,
712 : pkts_decapsulated);
713 :
714 : /* Increment any remaining batch stats */
715 4 : if (stats_n_packets)
716 : {
717 2 : vlib_increment_combined_counter
718 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
719 : thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
720 2 : node->runtime_data[0] = stats_sw_if_index;
721 : }
722 :
723 4 : return from_frame->n_vectors;
724 : }
725 :
726 2303 : VLIB_NODE_FN (gtpu4_input_node) (vlib_main_t * vm,
727 : vlib_node_runtime_t * node,
728 : vlib_frame_t * from_frame)
729 : {
730 3 : return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
731 : }
732 :
733 2301 : VLIB_NODE_FN (gtpu6_input_node) (vlib_main_t * vm,
734 : vlib_node_runtime_t * node,
735 : vlib_frame_t * from_frame)
736 : {
737 1 : return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
738 : }
739 :
740 : static char * gtpu_error_strings[] = {
741 : #define gtpu_error(n,s) s,
742 : #include <gtpu/gtpu_error.def>
743 : #undef gtpu_error
744 : #undef _
745 : };
746 :
747 134252 : VLIB_REGISTER_NODE (gtpu4_input_node) = {
748 : .name = "gtpu4-input",
749 : /* Takes a vector of packets. */
750 : .vector_size = sizeof (u32),
751 :
752 : .n_errors = GTPU_N_ERROR,
753 : .error_strings = gtpu_error_strings,
754 :
755 : .n_next_nodes = GTPU_INPUT_N_NEXT,
756 : .next_nodes = {
757 : #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
758 : foreach_gtpu_input_next
759 : #undef _
760 : },
761 :
762 : //temp .format_buffer = format_gtpu_header,
763 : .format_trace = format_gtpu_rx_trace,
764 : // $$$$ .unformat_buffer = unformat_gtpu_header,
765 : };
766 :
767 134252 : VLIB_REGISTER_NODE (gtpu6_input_node) = {
768 : .name = "gtpu6-input",
769 : /* Takes a vector of packets. */
770 : .vector_size = sizeof (u32),
771 :
772 : .n_errors = GTPU_N_ERROR,
773 : .error_strings = gtpu_error_strings,
774 :
775 : .n_next_nodes = GTPU_INPUT_N_NEXT,
776 : .next_nodes = {
777 : #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
778 : foreach_gtpu_input_next
779 : #undef _
780 : },
781 :
782 : //temp .format_buffer = format_gtpu_header,
783 : .format_trace = format_gtpu_rx_trace,
784 : // $$$$ .unformat_buffer = unformat_gtpu_header,
785 : };
786 :
787 : typedef enum {
788 : IP_GTPU_BYPASS_NEXT_DROP,
789 : IP_GTPU_BYPASS_NEXT_GTPU,
790 : IP_GTPU_BYPASS_N_NEXT,
791 : } ip_vxan_bypass_next_t;
792 :
793 : always_inline uword
794 0 : ip_gtpu_bypass_inline (vlib_main_t * vm,
795 : vlib_node_runtime_t * node,
796 : vlib_frame_t * frame,
797 : u32 is_ip4)
798 : {
799 0 : gtpu_main_t * gtm = >pu_main;
800 : u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
801 0 : vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
802 : vtep4_key_t last_vtep4; /* last IPv4 address / fib index
803 : matching a local VTEP address */
804 : vtep6_key_t last_vtep6; /* last IPv6 address / fib index
805 : matching a local VTEP address */
806 0 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
807 :
808 0 : from = vlib_frame_vector_args (frame);
809 0 : n_left_from = frame->n_vectors;
810 0 : next_index = node->cached_next_index;
811 0 : vlib_get_buffers (vm, from, bufs, n_left_from);
812 :
813 0 : if (node->flags & VLIB_NODE_FLAG_TRACE)
814 0 : ip4_forward_next_trace (vm, node, frame, VLIB_TX);
815 :
816 0 : if (is_ip4)
817 0 : vtep4_key_init (&last_vtep4);
818 : else
819 0 : vtep6_key_init (&last_vtep6);
820 :
821 0 : while (n_left_from > 0)
822 : {
823 0 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
824 :
825 0 : while (n_left_from >= 4 && n_left_to_next >= 2)
826 : {
827 : vlib_buffer_t * b0, * b1;
828 : ip4_header_t * ip40, * ip41;
829 : ip6_header_t * ip60, * ip61;
830 : udp_header_t * udp0, * udp1;
831 : u32 bi0, ip_len0, udp_len0, flags0, next0;
832 : u32 bi1, ip_len1, udp_len1, flags1, next1;
833 : i32 len_diff0, len_diff1;
834 : u8 error0, good_udp0, proto0;
835 : u8 error1, good_udp1, proto1;
836 :
837 : /* Prefetch next iteration. */
838 : {
839 0 : vlib_prefetch_buffer_header (b[2], LOAD);
840 0 : vlib_prefetch_buffer_header (b[3], LOAD);
841 :
842 0 : CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
843 0 : CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
844 : }
845 :
846 0 : bi0 = to_next[0] = from[0];
847 0 : bi1 = to_next[1] = from[1];
848 0 : from += 2;
849 0 : n_left_from -= 2;
850 0 : to_next += 2;
851 0 : n_left_to_next -= 2;
852 :
853 0 : b0 = b[0];
854 0 : b1 = b[1];
855 0 : b += 2;
856 0 : if (is_ip4)
857 : {
858 0 : ip40 = vlib_buffer_get_current (b0);
859 0 : ip41 = vlib_buffer_get_current (b1);
860 : }
861 : else
862 : {
863 0 : ip60 = vlib_buffer_get_current (b0);
864 0 : ip61 = vlib_buffer_get_current (b1);
865 : }
866 :
867 : /* Setup packet for next IP feature */
868 0 : vnet_feature_next(&next0, b0);
869 0 : vnet_feature_next(&next1, b1);
870 :
871 0 : if (is_ip4)
872 : {
873 : /* Treat IP frag packets as "experimental" protocol for now
874 : until support of IP frag reassembly is implemented */
875 0 : proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
876 0 : proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
877 : }
878 : else
879 : {
880 0 : proto0 = ip60->protocol;
881 0 : proto1 = ip61->protocol;
882 : }
883 :
884 : /* Process packet 0 */
885 0 : if (proto0 != IP_PROTOCOL_UDP)
886 0 : goto exit0; /* not UDP packet */
887 :
888 0 : if (is_ip4)
889 0 : udp0 = ip4_next_header (ip40);
890 : else
891 0 : udp0 = ip6_next_header (ip60);
892 :
893 0 : if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
894 0 : goto exit0; /* not GTPU packet */
895 :
896 : /* Validate DIP against VTEPs*/
897 0 : if (is_ip4)
898 : {
899 : #ifdef CLIB_HAVE_VEC512
900 0 : if (!vtep4_check_vector (>m->vtep_table, b0, ip40, &last_vtep4,
901 : >m->vtep4_u512))
902 : #else
903 0 : if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
904 : #endif
905 0 : goto exit0; /* no local VTEP for GTPU packet */
906 : }
907 : else
908 : {
909 0 : if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
910 0 : goto exit0; /* no local VTEP for GTPU packet */
911 : }
912 :
913 0 : flags0 = b0->flags;
914 0 : good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
915 :
916 : /* Don't verify UDP checksum for packets with explicit zero checksum. */
917 0 : good_udp0 |= udp0->checksum == 0;
918 :
919 : /* Verify UDP length */
920 0 : if (is_ip4)
921 0 : ip_len0 = clib_net_to_host_u16 (ip40->length);
922 : else
923 0 : ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
924 0 : udp_len0 = clib_net_to_host_u16 (udp0->length);
925 0 : len_diff0 = ip_len0 - udp_len0;
926 :
927 : /* Verify UDP checksum */
928 0 : if (PREDICT_FALSE (!good_udp0))
929 : {
930 0 : if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
931 : {
932 0 : if (is_ip4)
933 0 : flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
934 : else
935 0 : flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
936 0 : good_udp0 =
937 0 : (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
938 : }
939 : }
940 :
941 0 : if (is_ip4)
942 : {
943 0 : error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
944 0 : error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
945 : }
946 : else
947 : {
948 0 : error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
949 0 : error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
950 : }
951 :
952 0 : next0 = error0 ?
953 0 : IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
954 0 : b0->error = error0 ? error_node->errors[error0] : 0;
955 :
956 : /* gtpu-input node expect current at GTPU header */
957 0 : if (is_ip4)
958 0 : vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
959 : else
960 0 : vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
961 :
962 0 : exit0:
963 : /* Process packet 1 */
964 0 : if (proto1 != IP_PROTOCOL_UDP)
965 0 : goto exit1; /* not UDP packet */
966 :
967 0 : if (is_ip4)
968 0 : udp1 = ip4_next_header (ip41);
969 : else
970 0 : udp1 = ip6_next_header (ip61);
971 :
972 0 : if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
973 0 : goto exit1; /* not GTPU packet */
974 :
975 : /* Validate DIP against VTEPs*/
976 0 : if (is_ip4)
977 : {
978 : #ifdef CLIB_HAVE_VEC512
979 0 : if (!vtep4_check_vector (>m->vtep_table, b1, ip41, &last_vtep4,
980 : >m->vtep4_u512))
981 : #else
982 0 : if (!vtep4_check (>m->vtep_table, b1, ip41, &last_vtep4))
983 : #endif
984 0 : goto exit1; /* no local VTEP for GTPU packet */
985 : }
986 : else
987 : {
988 0 : if (!vtep6_check (>m->vtep_table, b1, ip61, &last_vtep6))
989 0 : goto exit1; /* no local VTEP for GTPU packet */
990 : }
991 :
992 0 : flags1 = b1->flags;
993 0 : good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
994 :
995 : /* Don't verify UDP checksum for packets with explicit zero checksum. */
996 0 : good_udp1 |= udp1->checksum == 0;
997 :
998 : /* Verify UDP length */
999 0 : if (is_ip4)
1000 0 : ip_len1 = clib_net_to_host_u16 (ip41->length);
1001 : else
1002 0 : ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1003 0 : udp_len1 = clib_net_to_host_u16 (udp1->length);
1004 0 : len_diff1 = ip_len1 - udp_len1;
1005 :
1006 : /* Verify UDP checksum */
1007 0 : if (PREDICT_FALSE (!good_udp1))
1008 : {
1009 0 : if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1010 : {
1011 0 : if (is_ip4)
1012 0 : flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1013 : else
1014 0 : flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1015 0 : good_udp1 =
1016 0 : (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1017 : }
1018 : }
1019 :
1020 0 : if (is_ip4)
1021 : {
1022 0 : error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1023 0 : error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1024 : }
1025 : else
1026 : {
1027 0 : error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1028 0 : error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1029 : }
1030 :
1031 0 : next1 = error1 ?
1032 0 : IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1033 0 : b1->error = error1 ? error_node->errors[error1] : 0;
1034 :
1035 : /* gtpu-input node expect current at GTPU header */
1036 0 : if (is_ip4)
1037 0 : vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1038 : else
1039 0 : vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1040 :
1041 0 : exit1:
1042 0 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1043 : to_next, n_left_to_next,
1044 : bi0, bi1, next0, next1);
1045 : }
1046 :
1047 0 : while (n_left_from > 0 && n_left_to_next > 0)
1048 : {
1049 : vlib_buffer_t * b0;
1050 : ip4_header_t * ip40;
1051 : ip6_header_t * ip60;
1052 : udp_header_t * udp0;
1053 : u32 bi0, ip_len0, udp_len0, flags0, next0;
1054 : i32 len_diff0;
1055 : u8 error0, good_udp0, proto0;
1056 :
1057 0 : bi0 = to_next[0] = from[0];
1058 0 : from += 1;
1059 0 : n_left_from -= 1;
1060 0 : to_next += 1;
1061 0 : n_left_to_next -= 1;
1062 :
1063 0 : b0 = b[0];
1064 0 : b++;
1065 0 : if (is_ip4)
1066 0 : ip40 = vlib_buffer_get_current (b0);
1067 : else
1068 0 : ip60 = vlib_buffer_get_current (b0);
1069 :
1070 : /* Setup packet for next IP feature */
1071 0 : vnet_feature_next(&next0, b0);
1072 :
1073 0 : if (is_ip4)
1074 : /* Treat IP4 frag packets as "experimental" protocol for now
1075 : until support of IP frag reassembly is implemented */
1076 0 : proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1077 : else
1078 0 : proto0 = ip60->protocol;
1079 :
1080 0 : if (proto0 != IP_PROTOCOL_UDP)
1081 0 : goto exit; /* not UDP packet */
1082 :
1083 0 : if (is_ip4)
1084 0 : udp0 = ip4_next_header (ip40);
1085 : else
1086 0 : udp0 = ip6_next_header (ip60);
1087 :
1088 0 : if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1089 0 : goto exit; /* not GTPU packet */
1090 :
1091 : /* Validate DIP against VTEPs*/
1092 0 : if (is_ip4)
1093 : {
1094 : #ifdef CLIB_HAVE_VEC512
1095 0 : if (!vtep4_check_vector (>m->vtep_table, b0, ip40, &last_vtep4,
1096 : >m->vtep4_u512))
1097 : #else
1098 0 : if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
1099 : #endif
1100 0 : goto exit; /* no local VTEP for GTPU packet */
1101 : }
1102 : else
1103 : {
1104 0 : if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
1105 0 : goto exit; /* no local VTEP for GTPU packet */
1106 : }
1107 :
1108 0 : flags0 = b0->flags;
1109 0 : good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1110 :
1111 : /* Don't verify UDP checksum for packets with explicit zero checksum. */
1112 0 : good_udp0 |= udp0->checksum == 0;
1113 :
1114 : /* Verify UDP length */
1115 0 : if (is_ip4)
1116 0 : ip_len0 = clib_net_to_host_u16 (ip40->length);
1117 : else
1118 0 : ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1119 0 : udp_len0 = clib_net_to_host_u16 (udp0->length);
1120 0 : len_diff0 = ip_len0 - udp_len0;
1121 :
1122 : /* Verify UDP checksum */
1123 0 : if (PREDICT_FALSE (!good_udp0))
1124 : {
1125 0 : if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1126 : {
1127 0 : if (is_ip4)
1128 0 : flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1129 : else
1130 0 : flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1131 0 : good_udp0 =
1132 0 : (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1133 : }
1134 : }
1135 :
1136 0 : if (is_ip4)
1137 : {
1138 0 : error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1139 0 : error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1140 : }
1141 : else
1142 : {
1143 0 : error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1144 0 : error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1145 : }
1146 :
1147 0 : next0 = error0 ?
1148 0 : IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1149 0 : b0->error = error0 ? error_node->errors[error0] : 0;
1150 :
1151 : /* gtpu-input node expect current at GTPU header */
1152 0 : if (is_ip4)
1153 0 : vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1154 : else
1155 0 : vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1156 :
1157 0 : exit:
1158 0 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1159 : to_next, n_left_to_next,
1160 : bi0, next0);
1161 : }
1162 :
1163 0 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1164 : }
1165 :
1166 0 : return frame->n_vectors;
1167 : }
1168 :
1169 2300 : VLIB_NODE_FN (ip4_gtpu_bypass_node) (vlib_main_t * vm,
1170 : vlib_node_runtime_t * node,
1171 : vlib_frame_t * frame)
1172 : {
1173 0 : return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1174 : }
1175 :
1176 134252 : VLIB_REGISTER_NODE (ip4_gtpu_bypass_node) = {
1177 : .name = "ip4-gtpu-bypass",
1178 : .vector_size = sizeof (u32),
1179 :
1180 : .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1181 : .next_nodes = {
1182 : [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1183 : [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1184 : },
1185 :
1186 : .format_buffer = format_ip4_header,
1187 : .format_trace = format_ip4_forward_next_trace,
1188 : };
1189 :
1190 : #ifndef CLIB_MARCH_VARIANT
1191 : /* Dummy init function to get us linked in. */
1192 575 : clib_error_t * ip4_gtpu_bypass_init (vlib_main_t * vm)
1193 575 : { return 0; }
1194 :
1195 1151 : VLIB_INIT_FUNCTION (ip4_gtpu_bypass_init);
1196 : #endif /* CLIB_MARCH_VARIANT */
1197 :
1198 2300 : VLIB_NODE_FN (ip6_gtpu_bypass_node) (vlib_main_t * vm,
1199 : vlib_node_runtime_t * node,
1200 : vlib_frame_t * frame)
1201 : {
1202 0 : return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1203 : }
1204 :
1205 134252 : VLIB_REGISTER_NODE (ip6_gtpu_bypass_node) = {
1206 : .name = "ip6-gtpu-bypass",
1207 : .vector_size = sizeof (u32),
1208 :
1209 : .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1210 : .next_nodes = {
1211 : [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1212 : [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1213 : },
1214 :
1215 : .format_buffer = format_ip6_header,
1216 : .format_trace = format_ip6_forward_next_trace,
1217 : };
1218 :
1219 : #ifndef CLIB_MARCH_VARIANT
1220 : /* Dummy init function to get us linked in. */
1221 575 : clib_error_t * ip6_gtpu_bypass_init (vlib_main_t * vm)
1222 575 : { return 0; }
1223 :
1224 1727 : VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
1225 :
1226 : #define foreach_gtpu_flow_error \
1227 : _(NONE, "no error") \
1228 : _(PAYLOAD_ERROR, "Payload type errors") \
1229 : _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1230 : _(IP_HEADER_ERROR, "Rx ip header errors") \
1231 : _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1232 : _(UDP_LENGTH_ERROR, "Rx udp length errors")
1233 :
1234 : typedef enum
1235 : {
1236 : #define _(f,s) GTPU_FLOW_ERROR_##f,
1237 : foreach_gtpu_flow_error
1238 : #undef _
1239 : #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1240 : #include <gtpu/gtpu_error.def>
1241 : #undef gtpu_error
1242 : GTPU_FLOW_N_ERROR,
1243 : } gtpu_flow_error_t;
1244 :
1245 : static char *gtpu_flow_error_strings[] = {
1246 : #define _(n,s) s,
1247 : foreach_gtpu_flow_error
1248 : #undef _
1249 : #define gtpu_error(n,s) s,
1250 : #include <gtpu/gtpu_error.def>
1251 : #undef gtpu_error
1252 : #undef _
1253 :
1254 : };
1255 :
1256 : #define gtpu_local_need_csum_check(_b) \
1257 : (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED || \
1258 : (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1259 : vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)))
1260 :
1261 : #define gtpu_local_csum_is_valid(_b) \
1262 : ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT || \
1263 : (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1264 : vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)) != 0)
1265 :
1266 : static_always_inline u8
1267 0 : gtpu_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t *b)
1268 : {
1269 0 : u32 flags = b->flags;
1270 : enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1271 :
1272 : /* Verify UDP checksum */
1273 0 : if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1274 : {
1275 0 : vlib_buffer_advance (b, -offset);
1276 0 : flags = ip4_tcp_udp_validate_checksum (vm, b);
1277 0 : vlib_buffer_advance (b, offset);
1278 : }
1279 :
1280 0 : return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1281 : }
1282 :
1283 : static_always_inline u8
1284 0 : gtpu_check_ip (vlib_buffer_t *b, u16 payload_len)
1285 : {
1286 0 : ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1287 : sizeof(ip4_header_t) - sizeof(udp_header_t);
1288 0 : u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1289 0 : u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1290 0 : return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1291 : }
1292 :
1293 : static_always_inline u8
1294 0 : gtpu_check_ip_udp_len (vlib_buffer_t *b)
1295 : {
1296 0 : ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1297 : sizeof(ip4_header_t) - sizeof(udp_header_t);
1298 0 : udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1299 0 : u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1300 0 : u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1301 0 : return udp_len > ip_len;
1302 : }
1303 :
1304 : static_always_inline u8
1305 0 : gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1306 : {
1307 0 : u8 error0 = GTPU_FLOW_ERROR_NONE;
1308 0 : if (ip_err0)
1309 0 : error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1310 0 : if (udp_err0)
1311 0 : error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1312 0 : if (csum_err0)
1313 0 : error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1314 0 : return error0;
1315 : }
1316 :
1317 :
1318 : always_inline uword
1319 0 : gtpu_flow_input (vlib_main_t * vm,
1320 : vlib_node_runtime_t * node,
1321 : vlib_frame_t * from_frame)
1322 : {
1323 : u32 n_left_from, next_index, * from, * to_next;
1324 0 : gtpu_main_t * gtm = >pu_main;
1325 0 : vnet_main_t * vnm = gtm->vnet_main;
1326 0 : vnet_interface_main_t * im = &vnm->interface_main;
1327 0 : u32 pkts_decapsulated = 0;
1328 0 : u32 thread_index = vlib_get_thread_index();
1329 : u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1330 : u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1331 :
1332 0 : from = vlib_frame_vector_args (from_frame);
1333 0 : n_left_from = from_frame->n_vectors;
1334 :
1335 0 : next_index = node->cached_next_index;
1336 0 : stats_sw_if_index = node->runtime_data[0];
1337 0 : stats_n_packets = stats_n_bytes = 0;
1338 :
1339 0 : while (n_left_from > 0)
1340 : {
1341 : u32 n_left_to_next;
1342 :
1343 0 : vlib_get_next_frame (vm, node, next_index,
1344 : to_next, n_left_to_next);
1345 :
1346 0 : while (n_left_from >= 4 && n_left_to_next >= 2)
1347 : {
1348 : u32 bi0, bi1;
1349 : vlib_buffer_t * b0, * b1;
1350 : u32 next0, next1;
1351 : gtpu_header_t * gtpu0, * gtpu1;
1352 : u32 gtpu_hdr_len0, gtpu_hdr_len1;
1353 : u32 tunnel_index0, tunnel_index1;
1354 : gtpu_tunnel_t * t0, * t1;
1355 : u32 error0, error1;
1356 : u32 sw_if_index0, sw_if_index1, len0, len1;
1357 0 : u8 has_space0 = 0, has_space1 = 0;
1358 : u8 ver0, ver1;
1359 :
1360 : /* Prefetch next iteration. */
1361 : {
1362 : vlib_buffer_t * p2, * p3;
1363 :
1364 0 : p2 = vlib_get_buffer (vm, from[2]);
1365 0 : p3 = vlib_get_buffer (vm, from[3]);
1366 :
1367 0 : vlib_prefetch_buffer_header (p2, LOAD);
1368 0 : vlib_prefetch_buffer_header (p3, LOAD);
1369 :
1370 0 : CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1371 0 : CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1372 : }
1373 :
1374 0 : bi0 = from[0];
1375 0 : bi1 = from[1];
1376 0 : to_next[0] = bi0;
1377 0 : to_next[1] = bi1;
1378 0 : from += 2;
1379 0 : to_next += 2;
1380 0 : n_left_to_next -= 2;
1381 0 : n_left_from -= 2;
1382 :
1383 0 : b0 = vlib_get_buffer (vm, bi0);
1384 0 : b1 = vlib_get_buffer (vm, bi1);
1385 :
1386 : /* udp leaves current_data pointing at the gtpu header */
1387 0 : gtpu0 = vlib_buffer_get_current (b0);
1388 0 : gtpu1 = vlib_buffer_get_current (b1);
1389 :
1390 0 : len0 = vlib_buffer_length_in_chain (vm, b0);
1391 0 : len1 = vlib_buffer_length_in_chain (vm, b1);
1392 :
1393 0 : tunnel_index0 = ~0;
1394 0 : error0 = 0;
1395 :
1396 0 : tunnel_index1 = ~0;
1397 0 : error1 = 0;
1398 :
1399 0 : ip_err0 = gtpu_check_ip (b0, len0);
1400 0 : udp_err0 = gtpu_check_ip_udp_len (b0);
1401 0 : ip_err1 = gtpu_check_ip (b1, len1);
1402 0 : udp_err1 = gtpu_check_ip_udp_len (b1);
1403 :
1404 0 : if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1405 0 : csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1406 : else
1407 0 : csum_err0 = !gtpu_local_csum_is_valid (b0);
1408 0 : if (PREDICT_FALSE (gtpu_local_need_csum_check (b1)))
1409 0 : csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1410 : else
1411 0 : csum_err1 = !gtpu_local_csum_is_valid (b1);
1412 :
1413 0 : if (ip_err0 || udp_err0 || csum_err0)
1414 : {
1415 0 : next0 = GTPU_INPUT_NEXT_DROP;
1416 0 : error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1417 0 : goto trace0;
1418 : }
1419 :
1420 : /* speculatively load gtp header version field */
1421 0 : ver0 = gtpu0->ver_flags;
1422 :
1423 : /*
1424 : * Manipulate gtpu header
1425 : * TBD: Manipulate Sequence Number and N-PDU Number
1426 : * TBD: Manipulate Next Extension Header
1427 : */
1428 0 : gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1429 :
1430 0 : has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1431 0 : if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1432 : {
1433 0 : error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1434 0 : next0 = GTPU_INPUT_NEXT_DROP;
1435 0 : goto trace0;
1436 : }
1437 :
1438 : /* Manipulate packet 0 */
1439 0 : ASSERT (b0->flow_id != 0);
1440 0 : tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1441 0 : t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1442 0 : b0->flow_id = 0;
1443 :
1444 : /* Pop gtpu header */
1445 0 : vlib_buffer_advance (b0, gtpu_hdr_len0);
1446 :
1447 : /* assign the next node */
1448 0 : if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1449 0 : (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1450 : {
1451 0 : error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1452 0 : next0 = GTPU_INPUT_NEXT_DROP;
1453 0 : goto trace0;
1454 : }
1455 0 : next0 = t0->decap_next_index;
1456 :
1457 0 : sw_if_index0 = t0->sw_if_index;
1458 :
1459 : /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1460 0 : vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1461 :
1462 0 : pkts_decapsulated ++;
1463 0 : stats_n_packets += 1;
1464 0 : stats_n_bytes += len0;
1465 :
1466 : /* Batch stats increment on the same gtpu tunnel so counter
1467 : is not incremented per packet */
1468 0 : if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1469 : {
1470 0 : stats_n_packets -= 1;
1471 0 : stats_n_bytes -= len0;
1472 0 : if (stats_n_packets)
1473 0 : vlib_increment_combined_counter
1474 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1475 : thread_index, stats_sw_if_index,
1476 : stats_n_packets, stats_n_bytes);
1477 0 : stats_n_packets = 1;
1478 0 : stats_n_bytes = len0;
1479 0 : stats_sw_if_index = sw_if_index0;
1480 : }
1481 :
1482 0 : trace0:
1483 0 : b0->error = error0 ? node->errors[error0] : 0;
1484 :
1485 0 : if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1486 : {
1487 : gtpu_rx_trace_t *tr
1488 0 : = vlib_add_trace (vm, node, b0, sizeof (*tr));
1489 0 : tr->next_index = next0;
1490 0 : tr->error = error0;
1491 0 : tr->tunnel_index = tunnel_index0;
1492 0 : tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1493 : }
1494 :
1495 0 : if (ip_err1 || udp_err1 || csum_err1)
1496 : {
1497 0 : next1 = GTPU_INPUT_NEXT_DROP;
1498 0 : error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1499 0 : goto trace1;
1500 : }
1501 :
1502 : /* speculatively load gtp header version field */
1503 0 : ver1 = gtpu1->ver_flags;
1504 :
1505 : /*
1506 : * Manipulate gtpu header
1507 : * TBD: Manipulate Sequence Number and N-PDU Number
1508 : * TBD: Manipulate Next Extension Header
1509 : */
1510 0 : gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1511 0 : has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1512 0 : if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1513 : {
1514 0 : error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1515 0 : next1 = GTPU_INPUT_NEXT_DROP;
1516 0 : goto trace1;
1517 : }
1518 :
1519 : /* Manipulate packet 1 */
1520 0 : ASSERT (b1->flow_id != 0);
1521 0 : tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1522 0 : t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1523 0 : b1->flow_id = 0;
1524 :
1525 : /* Pop gtpu header */
1526 0 : vlib_buffer_advance (b1, gtpu_hdr_len1);
1527 :
1528 : /* assign the next node */
1529 0 : if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1530 0 : (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1531 : {
1532 0 : error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1533 0 : next1 = GTPU_INPUT_NEXT_DROP;
1534 0 : goto trace1;
1535 : }
1536 0 : next1 = t1->decap_next_index;
1537 :
1538 0 : sw_if_index1 = t1->sw_if_index;
1539 :
1540 : /* Required to make the l2 tag push / pop code work on l2 subifs */
1541 : /* This won't happen in current implementation as only
1542 : ipv4/udp/gtpu/IPV4 type packets can be matched */
1543 0 : if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1544 0 : vnet_update_l2_len (b1);
1545 :
1546 : /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1547 0 : vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1548 :
1549 0 : pkts_decapsulated ++;
1550 0 : stats_n_packets += 1;
1551 0 : stats_n_bytes += len1;
1552 :
1553 : /* Batch stats increment on the same gtpu tunnel so counter
1554 : is not incremented per packet */
1555 0 : if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1556 : {
1557 0 : stats_n_packets -= 1;
1558 0 : stats_n_bytes -= len1;
1559 0 : if (stats_n_packets)
1560 0 : vlib_increment_combined_counter
1561 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1562 : thread_index, stats_sw_if_index,
1563 : stats_n_packets, stats_n_bytes);
1564 0 : stats_n_packets = 1;
1565 0 : stats_n_bytes = len1;
1566 0 : stats_sw_if_index = sw_if_index1;
1567 : }
1568 :
1569 0 : trace1:
1570 0 : b1->error = error1 ? node->errors[error1] : 0;
1571 :
1572 0 : if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1573 : {
1574 : gtpu_rx_trace_t *tr
1575 0 : = vlib_add_trace (vm, node, b1, sizeof (*tr));
1576 0 : tr->next_index = next1;
1577 0 : tr->error = error1;
1578 0 : tr->tunnel_index = tunnel_index1;
1579 0 : tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1580 : }
1581 :
1582 0 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1583 : to_next, n_left_to_next,
1584 : bi0, bi1, next0, next1);
1585 : }
1586 :
1587 0 : while (n_left_from > 0 && n_left_to_next > 0)
1588 : {
1589 : u32 bi0;
1590 : vlib_buffer_t * b0;
1591 : u32 next0;
1592 : gtpu_header_t * gtpu0;
1593 : u32 gtpu_hdr_len0;
1594 : u32 error0;
1595 : u32 tunnel_index0;
1596 : gtpu_tunnel_t * t0;
1597 : u32 sw_if_index0, len0;
1598 0 : u8 has_space0 = 0;
1599 : u8 ver0;
1600 :
1601 0 : bi0 = from[0];
1602 0 : to_next[0] = bi0;
1603 0 : from += 1;
1604 0 : to_next += 1;
1605 0 : n_left_from -= 1;
1606 0 : n_left_to_next -= 1;
1607 :
1608 0 : b0 = vlib_get_buffer (vm, bi0);
1609 0 : len0 = vlib_buffer_length_in_chain (vm, b0);
1610 :
1611 0 : tunnel_index0 = ~0;
1612 0 : error0 = 0;
1613 :
1614 0 : ip_err0 = gtpu_check_ip (b0, len0);
1615 0 : udp_err0 = gtpu_check_ip_udp_len (b0);
1616 0 : if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1617 0 : csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1618 : else
1619 0 : csum_err0 = !gtpu_local_csum_is_valid (b0);
1620 :
1621 0 : if (ip_err0 || udp_err0 || csum_err0)
1622 : {
1623 0 : next0 = GTPU_INPUT_NEXT_DROP;
1624 0 : error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1625 0 : goto trace00;
1626 : }
1627 :
1628 : /* udp leaves current_data pointing at the gtpu header */
1629 0 : gtpu0 = vlib_buffer_get_current (b0);
1630 :
1631 : /* speculatively load gtp header version field */
1632 0 : ver0 = gtpu0->ver_flags;
1633 :
1634 : /*
1635 : * Manipulate gtpu header
1636 : * TBD: Manipulate Sequence Number and N-PDU Number
1637 : * TBD: Manipulate Next Extension Header
1638 : */
1639 0 : gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1640 :
1641 0 : has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1642 0 : if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1643 : {
1644 0 : error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1645 0 : next0 = GTPU_INPUT_NEXT_DROP;
1646 0 : goto trace00;
1647 : }
1648 :
1649 0 : ASSERT (b0->flow_id != 0);
1650 0 : tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1651 0 : t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1652 0 : b0->flow_id = 0;
1653 :
1654 : /* Pop gtpu header */
1655 0 : vlib_buffer_advance (b0, gtpu_hdr_len0);
1656 :
1657 : /* assign the next node */
1658 0 : if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1659 0 : (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1660 : {
1661 0 : error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1662 0 : next0 = GTPU_INPUT_NEXT_DROP;
1663 0 : goto trace00;
1664 : }
1665 0 : next0 = t0->decap_next_index;
1666 :
1667 0 : sw_if_index0 = t0->sw_if_index;
1668 :
1669 : /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1670 0 : vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1671 :
1672 0 : pkts_decapsulated ++;
1673 0 : stats_n_packets += 1;
1674 0 : stats_n_bytes += len0;
1675 :
1676 : /* Batch stats increment on the same gtpu tunnel so counter
1677 : is not incremented per packet */
1678 0 : if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1679 : {
1680 0 : stats_n_packets -= 1;
1681 0 : stats_n_bytes -= len0;
1682 0 : if (stats_n_packets)
1683 0 : vlib_increment_combined_counter
1684 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1685 : thread_index, stats_sw_if_index,
1686 : stats_n_packets, stats_n_bytes);
1687 0 : stats_n_packets = 1;
1688 0 : stats_n_bytes = len0;
1689 0 : stats_sw_if_index = sw_if_index0;
1690 : }
1691 0 : trace00:
1692 0 : b0->error = error0 ? node->errors[error0] : 0;
1693 :
1694 0 : if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1695 : {
1696 : gtpu_rx_trace_t *tr
1697 0 : = vlib_add_trace (vm, node, b0, sizeof (*tr));
1698 0 : tr->next_index = next0;
1699 0 : tr->error = error0;
1700 0 : tr->tunnel_index = tunnel_index0;
1701 0 : tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1702 : }
1703 0 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1704 : to_next, n_left_to_next,
1705 : bi0, next0);
1706 : }
1707 :
1708 0 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1709 : }
1710 :
1711 : /* Do we still need this now that tunnel tx stats is kept? */
1712 0 : vlib_node_increment_counter (vm, gtpu4_flow_input_node.index,
1713 : GTPU_ERROR_DECAPSULATED,
1714 : pkts_decapsulated);
1715 :
1716 : /* Increment any remaining batch stats */
1717 0 : if (stats_n_packets)
1718 : {
1719 0 : vlib_increment_combined_counter
1720 : (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1721 : thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1722 0 : node->runtime_data[0] = stats_sw_if_index;
1723 : }
1724 :
1725 0 : return from_frame->n_vectors;
1726 : }
1727 :
1728 575 : VLIB_NODE_FN (gtpu4_flow_input_node) (vlib_main_t * vm,
1729 : vlib_node_runtime_t * node,
1730 : vlib_frame_t * from_frame)
1731 : {
1732 0 : return gtpu_flow_input(vm, node, from_frame);
1733 : }
1734 :
1735 :
1736 : /* *INDENT-OFF* */
1737 : #ifndef CLIB_MULTIARCH_VARIANT
1738 134252 : VLIB_REGISTER_NODE (gtpu4_flow_input_node) = {
1739 : .name = "gtpu4-flow-input",
1740 : .type = VLIB_NODE_TYPE_INTERNAL,
1741 : .vector_size = sizeof (u32),
1742 :
1743 : .format_trace = format_gtpu_rx_trace,
1744 :
1745 : .n_errors = GTPU_FLOW_N_ERROR,
1746 : .error_strings = gtpu_flow_error_strings,
1747 :
1748 : .n_next_nodes = GTPU_INPUT_N_NEXT,
1749 : .next_nodes = {
1750 : #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1751 : foreach_gtpu_input_next
1752 : #undef _
1753 :
1754 : },
1755 : };
1756 : #endif
1757 : /* *INDENT-ON* */
1758 :
1759 : #endif /* CLIB_MARCH_VARIANT */
|