Line data Source code
1 : /*
2 : * esp_decrypt.c : IPSec ESP decrypt node
3 : *
4 : * Copyright (c) 2015 Cisco and/or its affiliates.
5 : * Licensed under the Apache License, Version 2.0 (the "License");
6 : * you may not use this file except in compliance with the License.
7 : * You may obtain a copy of the License at:
8 : *
9 : * http://www.apache.org/licenses/LICENSE-2.0
10 : *
11 : * Unless required by applicable law or agreed to in writing, software
12 : * distributed under the License is distributed on an "AS IS" BASIS,
13 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 : * See the License for the specific language governing permissions and
15 : * limitations under the License.
16 : */
17 : #include <vnet/vnet.h>
18 : #include <vnet/api_errno.h>
19 : #include <vnet/ip/ip.h>
20 : #include <vnet/l2/l2_input.h>
21 :
22 : #include <vnet/ipsec/ipsec.h>
23 : #include <vnet/ipsec/esp.h>
24 : #include <vnet/ipsec/ipsec_io.h>
25 : #include <vnet/ipsec/ipsec_tun.h>
26 :
27 : #include <vnet/gre/packet.h>
28 :
29 : #define foreach_esp_decrypt_next \
30 : _ (DROP, "error-drop") \
31 : _ (IP4_INPUT, "ip4-input-no-checksum") \
32 : _ (IP6_INPUT, "ip6-input") \
33 : _ (L2_INPUT, "l2-input") \
34 : _ (MPLS_INPUT, "mpls-input") \
35 : _ (HANDOFF, "handoff")
36 :
37 : #define _(v, s) ESP_DECRYPT_NEXT_##v,
38 : typedef enum
39 : {
40 : foreach_esp_decrypt_next
41 : #undef _
42 : ESP_DECRYPT_N_NEXT,
43 : } esp_decrypt_next_t;
44 :
45 : #define foreach_esp_decrypt_post_next \
46 : _ (DROP, "error-drop") \
47 : _ (IP4_INPUT, "ip4-input-no-checksum") \
48 : _ (IP6_INPUT, "ip6-input") \
49 : _ (MPLS_INPUT, "mpls-input") \
50 : _ (L2_INPUT, "l2-input")
51 :
52 : #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
53 : typedef enum
54 : {
55 : foreach_esp_decrypt_post_next
56 : #undef _
57 : ESP_DECRYPT_POST_N_NEXT,
58 : } esp_decrypt_post_next_t;
59 :
60 : typedef struct
61 : {
62 : u32 seq;
63 : u32 sa_seq;
64 : u32 sa_seq_hi;
65 : u32 pkt_seq_hi;
66 : ipsec_crypto_alg_t crypto_alg;
67 : ipsec_integ_alg_t integ_alg;
68 : } esp_decrypt_trace_t;
69 :
70 : typedef vl_counter_esp_decrypt_enum_t esp_decrypt_error_t;
71 :
72 : /* The number of byres in the hisequence number */
73 : #define N_HI_ESN_BYTES 4
74 :
75 : /* packet trace format function */
76 : static u8 *
77 165068 : format_esp_decrypt_trace (u8 * s, va_list * args)
78 : {
79 165068 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
80 165068 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
81 165068 : esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
82 :
83 165068 : s = format (s,
84 : "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u "
85 : "pkt-seq-hi %u",
86 165068 : format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
87 165068 : t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi);
88 165068 : return s;
89 : }
90 :
91 : #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
92 :
93 : static_always_inline void
94 9102 : esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
95 : vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
96 : int e)
97 : {
98 9102 : vnet_crypto_op_t *op = ops;
99 9102 : u32 n_fail, n_ops = vec_len (ops);
100 :
101 9102 : if (n_ops == 0)
102 2618 : return;
103 :
104 6484 : n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
105 :
106 8432 : while (n_fail)
107 : {
108 1948 : ASSERT (op - ops < n_ops);
109 1948 : if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
110 : {
111 1948 : u32 err, bi = op->user_data;
112 1948 : if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
113 1948 : err = e;
114 : else
115 0 : err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
116 1948 : esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
117 : nexts, ESP_DECRYPT_NEXT_DROP,
118 1948 : vnet_buffer (b[bi])->ipsec.sad_index);
119 1948 : n_fail--;
120 : }
121 1948 : op++;
122 : }
123 : }
124 :
125 : static_always_inline void
126 9102 : esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
127 : vnet_crypto_op_t * ops, vlib_buffer_t * b[],
128 : u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
129 : {
130 :
131 9102 : vnet_crypto_op_t *op = ops;
132 9102 : u32 n_fail, n_ops = vec_len (ops);
133 :
134 9102 : if (PREDICT_TRUE (n_ops == 0))
135 8030 : return;
136 :
137 1072 : n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
138 :
139 1072 : while (n_fail)
140 : {
141 0 : ASSERT (op - ops < n_ops);
142 0 : if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
143 : {
144 0 : u32 err, bi = op->user_data;
145 0 : if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
146 0 : err = e;
147 : else
148 0 : err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
149 0 : esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
150 : nexts, ESP_DECRYPT_NEXT_DROP,
151 0 : vnet_buffer (b[bi])->ipsec.sad_index);
152 0 : n_fail--;
153 : }
154 0 : op++;
155 : }
156 : }
157 :
158 : always_inline void
159 213843 : esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
160 : u16 tail)
161 : {
162 213843 : vlib_buffer_t *before_last = b;
163 :
164 213843 : if (last->current_length > tail)
165 : {
166 205803 : last->current_length -= tail;
167 205803 : return;
168 : }
169 8040 : ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
170 :
171 18760 : while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
172 : {
173 10720 : before_last = b;
174 10720 : b = vlib_get_buffer (vm, b->next_buffer);
175 : }
176 8040 : before_last->current_length -= tail - last->current_length;
177 8040 : vlib_buffer_free_one (vm, before_last->next_buffer);
178 8040 : before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
179 : }
180 :
181 : /* ICV is splitted in last two buffers so move it to the last buffer and
182 : return pointer to it */
183 : static_always_inline u8 *
184 12060 : esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
185 : esp_decrypt_packet_data_t * pd,
186 : esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
187 : {
188 : vlib_buffer_t *before_last, *bp;
189 12060 : u16 last_sz = pd2->lb->current_length;
190 12060 : u16 first_sz = icv_sz - last_sz;
191 :
192 12060 : bp = before_last = first;
193 34304 : while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
194 : {
195 22244 : before_last = bp;
196 22244 : bp = vlib_get_buffer (vm, bp->next_buffer);
197 : }
198 :
199 12060 : u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
200 12060 : memmove (lb_curr + first_sz, lb_curr, last_sz);
201 12060 : clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
202 : first_sz);
203 12060 : before_last->current_length -= first_sz;
204 12060 : if (before_last == first)
205 1876 : pd->current_length -= first_sz;
206 12060 : clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
207 12060 : if (dif)
208 12060 : dif[0] = first_sz;
209 12060 : pd2->lb = before_last;
210 12060 : pd2->icv_removed = 1;
211 12060 : pd2->free_buffer_index = before_last->next_buffer;
212 12060 : before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
213 12060 : return lb_curr;
214 : }
215 :
216 : static_always_inline u16
217 110571 : esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
218 : esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
219 : u16 *len, vlib_buffer_t *b, u8 *payload)
220 : {
221 110571 : if (!ipsec_sa_is_set_USE_ESN (sa))
222 65115 : return 0;
223 : /* shift ICV by 4 bytes to insert ESN */
224 45456 : u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
225 : u8 tmp[ESP_MAX_ICV_SIZE];
226 :
227 45456 : if (pd2->icv_removed)
228 : {
229 0 : u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
230 0 : if (space_left >= N_HI_ESN_BYTES)
231 : {
232 0 : clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
233 : N_HI_ESN_BYTES);
234 0 : *data_len += N_HI_ESN_BYTES;
235 : }
236 : else
237 0 : return N_HI_ESN_BYTES;
238 :
239 0 : len[0] = b->current_length;
240 : }
241 : else
242 : {
243 45456 : clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
244 45456 : clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES);
245 45456 : clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp,
246 : ESP_MAX_ICV_SIZE);
247 45456 : *data_len += N_HI_ESN_BYTES;
248 45456 : *digest += N_HI_ESN_BYTES;
249 : }
250 45456 : return N_HI_ESN_BYTES;
251 : }
252 :
253 : static_always_inline u8 *
254 7236 : esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
255 : esp_decrypt_packet_data_t * pd,
256 : esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
257 : ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
258 : {
259 7236 : u16 dif = 0;
260 7236 : u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
261 7236 : if (dif)
262 7236 : *len -= dif;
263 :
264 7236 : if (ipsec_sa_is_set_USE_ESN (sa))
265 : {
266 3618 : u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
267 3618 : u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
268 :
269 3618 : if (space_left >= N_HI_ESN_BYTES)
270 : {
271 3216 : clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
272 : N_HI_ESN_BYTES);
273 3216 : *len += N_HI_ESN_BYTES;
274 : }
275 : else
276 : {
277 : /* no space for ESN at the tail, use the next buffer
278 : * (with ICV data) */
279 402 : ASSERT (pd2->icv_removed);
280 402 : vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index);
281 402 : clib_memcpy_fast (vlib_buffer_get_current (tmp) - N_HI_ESN_BYTES,
282 : &seq_hi, N_HI_ESN_BYTES);
283 402 : extra_esn[0] = 1;
284 : }
285 : }
286 7236 : return digest;
287 : }
288 :
289 : static_always_inline int
290 39664 : esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
291 : const esp_decrypt_packet_data_t *pd,
292 : esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
293 : vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
294 : u32 start_len, u8 **digest, u16 *n_ch,
295 : u32 *integ_total_len)
296 : {
297 : vnet_crypto_op_chunk_t *ch;
298 39664 : vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer);
299 39664 : u16 n_chunks = 1;
300 : u32 total_len;
301 39664 : vec_add2 (ptd->chunks, ch, 1);
302 39664 : total_len = ch->len = start_len;
303 39664 : ch->src = start_src;
304 :
305 : while (1)
306 : {
307 52528 : vec_add2 (ptd->chunks, ch, 1);
308 52528 : n_chunks += 1;
309 52528 : ch->src = vlib_buffer_get_current (cb);
310 52528 : if (pd2->lb == cb)
311 : {
312 39664 : if (pd2->icv_removed)
313 5360 : ch->len = cb->current_length;
314 : else
315 34304 : ch->len = cb->current_length - icv_sz;
316 39664 : if (ipsec_sa_is_set_USE_ESN (sa0))
317 : {
318 19832 : u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
319 : u8 tmp[ESP_MAX_ICV_SIZE];
320 : u8 *esn;
321 : vlib_buffer_t *tmp_b;
322 19832 : u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
323 19832 : if (space_left < N_HI_ESN_BYTES)
324 : {
325 1206 : if (pd2->icv_removed)
326 : {
327 : /* use pre-data area from the last bufer
328 : that was removed from the chain */
329 402 : tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
330 402 : esn = tmp_b->data - N_HI_ESN_BYTES;
331 : }
332 : else
333 : {
334 : /* no space, need to allocate new buffer */
335 804 : u32 tmp_bi = 0;
336 804 : if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
337 0 : return -1;
338 804 : tmp_b = vlib_get_buffer (vm, tmp_bi);
339 804 : esn = tmp_b->data;
340 804 : pd2->free_buffer_index = tmp_bi;
341 : }
342 1206 : clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES);
343 :
344 1206 : vec_add2 (ptd->chunks, ch, 1);
345 1206 : n_chunks += 1;
346 1206 : ch->src = esn;
347 1206 : ch->len = N_HI_ESN_BYTES;
348 : }
349 : else
350 : {
351 18626 : if (pd2->icv_removed)
352 : {
353 2278 : clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb),
354 : &seq_hi, N_HI_ESN_BYTES);
355 : }
356 : else
357 : {
358 16348 : clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
359 16348 : clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES);
360 16348 : clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp,
361 : ESP_MAX_ICV_SIZE);
362 16348 : *digest += N_HI_ESN_BYTES;
363 : }
364 18626 : ch->len += N_HI_ESN_BYTES;
365 : }
366 : }
367 39664 : total_len += ch->len;
368 39664 : break;
369 : }
370 : else
371 12864 : total_len += ch->len = cb->current_length;
372 :
373 12864 : if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
374 0 : break;
375 :
376 12864 : cb = vlib_get_buffer (vm, cb->next_buffer);
377 : }
378 :
379 39664 : if (n_ch)
380 31624 : *n_ch = n_chunks;
381 39664 : if (integ_total_len)
382 8040 : *integ_total_len = total_len;
383 :
384 39664 : return 0;
385 : }
386 :
387 : static_always_inline u32
388 60300 : esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
389 : esp_decrypt_packet_data_t * pd,
390 : esp_decrypt_packet_data2_t * pd2,
391 : ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
392 : u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
393 : {
394 : vnet_crypto_op_chunk_t *ch;
395 60300 : vlib_buffer_t *cb = b;
396 60300 : u16 n_chunks = 1;
397 : u32 total_len;
398 60300 : vec_add2 (ptd->chunks, ch, 1);
399 60300 : total_len = ch->len = start_len;
400 60300 : ch->src = ch->dst = start;
401 60300 : cb = vlib_get_buffer (vm, cb->next_buffer);
402 60300 : n_chunks = 1;
403 :
404 : while (1)
405 : {
406 83884 : vec_add2 (ptd->chunks, ch, 1);
407 83884 : n_chunks += 1;
408 83884 : ch->src = ch->dst = vlib_buffer_get_current (cb);
409 83884 : if (pd2->lb == cb)
410 : {
411 60300 : if (ipsec_sa_is_set_IS_AEAD (sa0))
412 : {
413 24120 : if (pd2->lb->current_length < icv_sz)
414 : {
415 4824 : u16 dif = 0;
416 4824 : *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif);
417 :
418 : /* this chunk does not contain crypto data */
419 4824 : n_chunks -= 1;
420 : /* and fix previous chunk's length as it might have
421 : been changed */
422 4824 : ASSERT (n_chunks > 0);
423 4824 : if (pd2->lb == b)
424 : {
425 0 : total_len -= dif;
426 0 : ch[-1].len -= dif;
427 : }
428 : else
429 : {
430 4824 : total_len = total_len + pd2->lb->current_length -
431 4824 : ch[-1].len;
432 4824 : ch[-1].len = pd2->lb->current_length;
433 : }
434 4824 : break;
435 : }
436 : else
437 19296 : *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
438 : }
439 :
440 55476 : if (pd2->icv_removed)
441 4824 : total_len += ch->len = cb->current_length;
442 : else
443 50652 : total_len += ch->len = cb->current_length - icv_sz;
444 : }
445 : else
446 23584 : total_len += ch->len = cb->current_length;
447 :
448 79060 : if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
449 55476 : break;
450 :
451 23584 : cb = vlib_get_buffer (vm, cb->next_buffer);
452 : }
453 :
454 60300 : if (n_ch)
455 40200 : *n_ch = n_chunks;
456 :
457 60300 : return total_len;
458 : }
459 :
460 : static_always_inline void
461 181072 : esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
462 : ipsec_per_thread_data_t * ptd,
463 : vnet_crypto_op_t *** crypto_ops,
464 : vnet_crypto_op_t *** integ_ops,
465 : vnet_crypto_op_t * op,
466 : ipsec_sa_t * sa0, u8 * payload,
467 : u16 len, u8 icv_sz, u8 iv_sz,
468 : esp_decrypt_packet_data_t * pd,
469 : esp_decrypt_packet_data2_t * pd2,
470 : vlib_buffer_t * b, u16 * next, u32 index)
471 : {
472 181072 : const u8 esp_sz = sizeof (esp_header_t);
473 :
474 181072 : if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
475 : {
476 136389 : vnet_crypto_op_init (op, sa0->integ_op_id);
477 136389 : op->key_index = sa0->integ_key_index;
478 136389 : op->src = payload;
479 136389 : op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
480 136389 : op->user_data = index;
481 136389 : op->digest = payload + len;
482 136389 : op->digest_len = icv_sz;
483 136389 : op->len = len;
484 :
485 136389 : if (pd->is_chain)
486 : {
487 : /* buffer is chained */
488 32964 : op->len = pd->current_length;
489 :
490 : /* special case when ICV is splitted and needs to be reassembled
491 : * first -> move it to the last buffer. Also take into account
492 : * that ESN needs to be added after encrypted data and may or
493 : * may not fit in the tail.*/
494 32964 : if (pd2->lb->current_length < icv_sz)
495 : {
496 5628 : u8 extra_esn = 0;
497 5628 : op->digest =
498 5628 : esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
499 : &extra_esn, &op->len);
500 :
501 5628 : if (extra_esn)
502 : {
503 : /* esn is in the last buffer, that was unlinked from
504 : * the chain */
505 402 : op->len = b->current_length;
506 : }
507 : else
508 : {
509 5226 : if (pd2->lb == b)
510 : {
511 : /* we now have a single buffer of crypto data, adjust
512 : * the length (second buffer contains only ICV) */
513 1340 : *integ_ops = &ptd->integ_ops;
514 1340 : *crypto_ops = &ptd->crypto_ops;
515 1340 : len = b->current_length;
516 1340 : goto out;
517 : }
518 : }
519 : }
520 : else
521 27336 : op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
522 :
523 31624 : op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
524 31624 : op->chunk_index = vec_len (ptd->chunks);
525 31624 : if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
526 31624 : payload, pd->current_length,
527 : &op->digest, &op->n_chunks, 0) < 0)
528 : {
529 0 : esp_decrypt_set_next_index (
530 : b, node, vm->thread_index, ESP_DECRYPT_ERROR_NO_BUFFERS, 0,
531 : next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
532 0 : return;
533 : }
534 : }
535 : else
536 103425 : esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
537 : payload);
538 136389 : out:
539 136389 : vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
540 : }
541 :
542 181072 : payload += esp_sz;
543 181072 : len -= esp_sz;
544 :
545 181072 : if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
546 : {
547 166093 : vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
548 166093 : op->key_index = sa0->crypto_key_index;
549 166093 : op->iv = payload;
550 :
551 166093 : if (ipsec_sa_is_set_IS_CTR (sa0))
552 : {
553 : /* construct nonce in a scratch space in front of the IP header */
554 87969 : esp_ctr_nonce_t *nonce =
555 87969 : (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
556 : sizeof (*nonce));
557 87969 : if (ipsec_sa_is_set_IS_AEAD (sa0))
558 : {
559 : /* constuct aad in a scratch space in front of the nonce */
560 44556 : esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
561 44556 : op->aad = (u8 *) nonce - sizeof (esp_aead_t);
562 44556 : op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
563 44556 : op->tag = payload + len;
564 44556 : op->tag_len = 16;
565 : }
566 : else
567 : {
568 43413 : nonce->ctr = clib_host_to_net_u32 (1);
569 : }
570 87969 : nonce->salt = sa0->salt;
571 87969 : ASSERT (sizeof (u64) == iv_sz);
572 87969 : nonce->iv = *(u64 *) op->iv;
573 87969 : op->iv = (u8 *) nonce;
574 : }
575 166093 : op->src = op->dst = payload += iv_sz;
576 166093 : op->len = len - iv_sz;
577 166093 : op->user_data = index;
578 :
579 166093 : if (pd->is_chain && (pd2->lb != b))
580 : {
581 : /* buffer is chained */
582 40200 : op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
583 40200 : op->chunk_index = vec_len (ptd->chunks);
584 40200 : esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
585 40200 : payload, len - pd->iv_sz + pd->icv_sz,
586 : &op->tag, &op->n_chunks);
587 : }
588 :
589 166093 : vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
590 : }
591 : }
592 :
593 : static_always_inline esp_decrypt_error_t
594 39305 : esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
595 : ipsec_per_thread_data_t *ptd,
596 : vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
597 : u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
598 : esp_decrypt_packet_data_t *pd,
599 : esp_decrypt_packet_data2_t *pd2, u32 bi,
600 : vlib_buffer_t *b, u16 *next, u16 async_next)
601 : {
602 39305 : const u8 esp_sz = sizeof (esp_header_t);
603 39305 : esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
604 39305 : esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
605 39305 : u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
606 39305 : const u32 key_index = sa0->crypto_key_index;
607 39305 : u32 crypto_len, integ_len = 0;
608 39305 : i16 crypto_start_offset, integ_start_offset = 0;
609 39305 : u8 flags = 0;
610 :
611 39305 : if (!ipsec_sa_is_set_IS_AEAD (sa0))
612 : {
613 : /* linked algs */
614 15722 : integ_start_offset = payload - b->data;
615 15722 : integ_len = len;
616 15722 : if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
617 15722 : flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
618 :
619 15722 : if (pd->is_chain)
620 : {
621 : /* buffer is chained */
622 8576 : integ_len = pd->current_length;
623 :
624 : /* special case when ICV is splitted and needs to be reassembled
625 : * first -> move it to the last buffer. Also take into account
626 : * that ESN needs to be added after encrypted data and may or
627 : * may not fit in the tail.*/
628 8576 : if (pd2->lb->current_length < icv_sz)
629 : {
630 1608 : u8 extra_esn = 0;
631 1608 : tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
632 : &extra_esn, &integ_len);
633 :
634 1608 : if (extra_esn)
635 : {
636 : /* esn is in the last buffer, that was unlinked from
637 : * the chain */
638 0 : integ_len = b->current_length;
639 : }
640 : else
641 : {
642 1608 : if (pd2->lb == b)
643 : {
644 : /* we now have a single buffer of crypto data, adjust
645 : * the length (second buffer contains only ICV) */
646 536 : len = b->current_length;
647 536 : goto out;
648 : }
649 : }
650 : }
651 : else
652 6968 : tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
653 :
654 8040 : flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
655 8040 : if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
656 8040 : payload, pd->current_length, &tag, 0,
657 : &integ_len) < 0)
658 : {
659 : /* allocate buffer failed, will not add to frame and drop */
660 0 : return (ESP_DECRYPT_ERROR_NO_BUFFERS);
661 : }
662 : }
663 : else
664 7146 : esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
665 : }
666 :
667 23583 : out:
668 : /* crypto */
669 39305 : payload += esp_sz;
670 39305 : len -= esp_sz;
671 39305 : iv = payload;
672 :
673 39305 : if (ipsec_sa_is_set_IS_CTR (sa0))
674 : {
675 : /* construct nonce in a scratch space in front of the IP header */
676 23583 : esp_ctr_nonce_t *nonce =
677 23583 : (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
678 23583 : if (ipsec_sa_is_set_IS_AEAD (sa0))
679 : {
680 : /* constuct aad in a scratch space in front of the nonce */
681 23583 : esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
682 23583 : aad = (u8 *) nonce - sizeof (esp_aead_t);
683 23583 : esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
684 23583 : tag = payload + len;
685 : }
686 : else
687 : {
688 0 : nonce->ctr = clib_host_to_net_u32 (1);
689 : }
690 23583 : nonce->salt = sa0->salt;
691 23583 : ASSERT (sizeof (u64) == iv_sz);
692 23583 : nonce->iv = *(u64 *) iv;
693 23583 : iv = (u8 *) nonce;
694 : }
695 :
696 39305 : crypto_start_offset = (payload += iv_sz) - b->data;
697 39305 : crypto_len = len - iv_sz;
698 :
699 39305 : if (pd->is_chain && (pd2->lb != b))
700 : {
701 : /* buffer is chained */
702 20100 : flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
703 :
704 20100 : crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
705 : payload,
706 20100 : len - pd->iv_sz + pd->icv_sz,
707 : &tag, 0);
708 : }
709 :
710 39305 : *async_pd = *pd;
711 39305 : *async_pd2 = *pd2;
712 :
713 : /* for AEAD integ_len - crypto_len will be negative, it is ok since it
714 : * is ignored by the engine. */
715 39305 : vnet_crypto_async_add_to_frame (
716 39305 : vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
717 : integ_start_offset, bi, async_next, iv, tag, aad, flags);
718 :
719 39305 : return (ESP_DECRYPT_ERROR_RX_PKTS);
720 : }
721 :
722 : static_always_inline void
723 218139 : esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
724 : const u16 *next_by_next_header,
725 : const esp_decrypt_packet_data_t *pd,
726 : const esp_decrypt_packet_data2_t *pd2,
727 : vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
728 : int is_async)
729 : {
730 218139 : ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
731 218139 : vlib_buffer_t *lb = b;
732 218139 : const u8 esp_sz = sizeof (esp_header_t);
733 218139 : const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
734 218139 : u8 pad_length = 0, next_header = 0;
735 : u16 icv_sz;
736 :
737 : /*
738 : * redo the anti-reply check
739 : * in this frame say we have sequence numbers, s, s+1, s+1, s+1
740 : * and s and s+1 are in the window. When we did the anti-replay
741 : * check above we did so against the state of the window (W),
742 : * after packet s-1. So each of the packets in the sequence will be
743 : * accepted.
744 : * This time s will be cheked against Ws-1, s+1 chceked against Ws
745 : * (i.e. the window state is updated/advnaced)
746 : * so this time the successive s+! packet will be dropped.
747 : * This is a consequence of batching the decrypts. If the
748 : * check-dcrypt-advance process was done for each packet it would
749 : * be fine. But we batch the decrypts because it's much more efficient
750 : * to do so in SW and if we offload to HW and the process is async.
751 : *
752 : * You're probably thinking, but this means an attacker can send the
753 : * above sequence and cause VPP to perform decrpyts that will fail,
754 : * and that's true. But if the attacker can determine s (a valid
755 : * sequence number in the window) which is non-trivial, it can generate
756 : * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
757 : * implementation, sequential or batching, from decrypting these.
758 : */
759 218139 : if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
760 : NULL))
761 : {
762 622 : esp_decrypt_set_next_index (b, node, vm->thread_index,
763 : ESP_DECRYPT_ERROR_REPLAY, 0, next,
764 : ESP_DECRYPT_NEXT_DROP, pd->sa_index);
765 622 : return;
766 : }
767 :
768 : u64 n_lost =
769 217517 : ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
770 :
771 217517 : vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
772 : vm->thread_index, pd->sa_index);
773 :
774 217517 : if (pd->is_chain)
775 : {
776 65660 : lb = pd2->lb;
777 65660 : icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
778 65660 : if (pd2->free_buffer_index)
779 : {
780 12864 : vlib_buffer_free_one (vm, pd2->free_buffer_index);
781 12864 : lb->next_buffer = 0;
782 : }
783 65660 : if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
784 : {
785 : /* esp footer is either splitted in two buffers or in the before
786 : * last buffer */
787 :
788 0 : vlib_buffer_t *before_last = b, *bp = b;
789 0 : while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
790 : {
791 0 : before_last = bp;
792 0 : bp = vlib_get_buffer (vm, bp->next_buffer);
793 : }
794 0 : u8 *bt = vlib_buffer_get_tail (before_last);
795 :
796 0 : if (lb->current_length == icv_sz)
797 : {
798 0 : esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
799 0 : pad_length = f->pad_length;
800 0 : next_header = f->next_header;
801 : }
802 : else
803 : {
804 0 : pad_length = (bt - 1)[0];
805 0 : next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
806 : }
807 : }
808 : else
809 : {
810 65660 : esp_footer_t *f =
811 65660 : (esp_footer_t *) (lb->data + lb->current_data +
812 65660 : lb->current_length - sizeof (esp_footer_t) -
813 : icv_sz);
814 65660 : pad_length = f->pad_length;
815 65660 : next_header = f->next_header;
816 : }
817 : }
818 : else
819 : {
820 151857 : icv_sz = pd->icv_sz;
821 151857 : esp_footer_t *f =
822 151857 : (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
823 151857 : sizeof (esp_footer_t) - icv_sz);
824 151857 : pad_length = f->pad_length;
825 151857 : next_header = f->next_header;
826 : }
827 :
828 217517 : u16 adv = pd->iv_sz + esp_sz;
829 217517 : u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
830 217517 : u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
831 217517 : b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
832 :
833 217517 : if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
834 142663 : {
835 142663 : u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
836 : sizeof (udp_header_t) : 0;
837 142663 : u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
838 142663 : u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
839 142663 : u8 *ip = old_ip + adv + udp_sz;
840 :
841 142663 : if (is_ip6 && ip_hdr_sz > 64)
842 0 : memmove (ip, old_ip, ip_hdr_sz);
843 : else
844 142663 : clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
845 :
846 142663 : b->current_data = pd->current_data + adv - ip_hdr_sz;
847 142663 : b->current_length += ip_hdr_sz - adv;
848 142663 : esp_remove_tail (vm, b, lb, tail);
849 :
850 142663 : if (is_ip6)
851 : {
852 27662 : ip6_header_t *ip6 = (ip6_header_t *) ip;
853 27662 : u16 len = clib_net_to_host_u16 (ip6->payload_length);
854 27662 : len -= adv + tail_orig;
855 27662 : ip6->payload_length = clib_host_to_net_u16 (len);
856 27662 : ip6->protocol = next_header;
857 27662 : next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
858 : }
859 : else
860 : {
861 115001 : ip4_header_t *ip4 = (ip4_header_t *) ip;
862 115001 : ip_csum_t sum = ip4->checksum;
863 115001 : u16 len = clib_net_to_host_u16 (ip4->length);
864 115001 : len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
865 115001 : sum = ip_csum_update (sum, ip4->protocol, next_header,
866 : ip4_header_t, protocol);
867 115001 : sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
868 115001 : ip4->checksum = ip_csum_fold (sum);
869 115001 : ip4->protocol = next_header;
870 115001 : ip4->length = len;
871 115001 : next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
872 : }
873 : }
874 : else
875 : {
876 74854 : if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
877 : {
878 37983 : next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
879 37983 : b->current_data = pd->current_data + adv;
880 37983 : b->current_length = pd->current_length - adv;
881 37983 : esp_remove_tail (vm, b, lb, tail);
882 : }
883 36871 : else if (next_header == IP_PROTOCOL_IPV6)
884 : {
885 32943 : next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
886 32943 : b->current_data = pd->current_data + adv;
887 32943 : b->current_length = pd->current_length - adv;
888 32943 : esp_remove_tail (vm, b, lb, tail);
889 : }
890 3928 : else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
891 : {
892 254 : next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT;
893 254 : b->current_data = pd->current_data + adv;
894 254 : b->current_length = pd->current_length - adv;
895 254 : esp_remove_tail (vm, b, lb, tail);
896 : }
897 3674 : else if (is_tun && next_header == IP_PROTOCOL_GRE)
898 3674 : {
899 : gre_header_t *gre;
900 :
901 3674 : b->current_data = pd->current_data + adv;
902 3674 : b->current_length = pd->current_length - adv - tail;
903 :
904 3674 : gre = vlib_buffer_get_current (b);
905 :
906 3674 : vlib_buffer_advance (b, sizeof (*gre));
907 :
908 3674 : switch (clib_net_to_host_u16 (gre->protocol))
909 : {
910 260 : case GRE_PROTOCOL_teb:
911 260 : vnet_update_l2_len (b);
912 260 : next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
913 260 : break;
914 2146 : case GRE_PROTOCOL_ip4:
915 2146 : next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
916 2146 : break;
917 1267 : case GRE_PROTOCOL_ip6:
918 1267 : next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
919 1267 : break;
920 1 : default:
921 1 : esp_decrypt_set_next_index (
922 : b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
923 : next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
924 1 : break;
925 : }
926 : }
927 0 : else if ((next[0] = vec_elt (next_by_next_header, next_header)) !=
928 : (u16) ~0)
929 : {
930 0 : b->current_data = pd->current_data + adv;
931 0 : b->current_length = pd->current_length - adv;
932 0 : esp_remove_tail (vm, b, lb, tail);
933 : }
934 : else
935 : {
936 0 : esp_decrypt_set_next_index (b, node, vm->thread_index,
937 : ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
938 : ESP_DECRYPT_NEXT_DROP, pd->sa_index);
939 0 : return;
940 : }
941 :
942 74854 : if (is_tun)
943 : {
944 18765 : if (ipsec_sa_is_set_IS_PROTECT (sa0))
945 : {
946 : /*
947 : * There are two encap possibilities
948 : * 1) the tunnel and ths SA are prodiving encap, i.e. it's
949 : * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
950 : * implying the SA is in tunnel mode (on a tunnel interface)
951 : * 2) only the tunnel provides encap
952 : * MAC | TUN-IP | ESP | PAYLOAD
953 : * implying the SA is in transport mode.
954 : *
955 : * For 2) we need only strip the tunnel encap and we're good.
956 : * since the tunnel and crypto ecnap (int the tun=protect
957 : * object) are the same and we verified above that these match
958 : * for 1) we need to strip the SA-IP outer headers, to
959 : * reveal the tunnel IP and then check that this matches
960 : * the configured tunnel.
961 : */
962 : const ipsec_tun_protect_t *itp;
963 :
964 : itp =
965 1024 : ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index);
966 :
967 1024 : if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
968 : {
969 : const ip4_header_t *ip4;
970 :
971 707 : ip4 = vlib_buffer_get_current (b);
972 :
973 707 : if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
974 644 : &ip4->dst_address) ||
975 644 : !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
976 : &ip4->src_address))
977 : {
978 63 : esp_decrypt_set_next_index (
979 : b, node, vm->thread_index,
980 : ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
981 : ESP_DECRYPT_NEXT_DROP, pd->sa_index);
982 : }
983 : }
984 317 : else if (next_header == IP_PROTOCOL_IPV6)
985 : {
986 : const ip6_header_t *ip6;
987 :
988 317 : ip6 = vlib_buffer_get_current (b);
989 :
990 317 : if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
991 254 : &ip6->dst_address) ||
992 254 : !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
993 : &ip6->src_address))
994 : {
995 63 : esp_decrypt_set_next_index (
996 : b, node, vm->thread_index,
997 : ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
998 : ESP_DECRYPT_NEXT_DROP, pd->sa_index);
999 : }
1000 : }
1001 : }
1002 : }
1003 : }
1004 :
1005 217517 : if (PREDICT_FALSE (n_lost))
1006 24109 : vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
1007 : vm->thread_index, pd->sa_index, n_lost);
1008 : }
1009 :
1010 : always_inline uword
1011 5738 : esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1012 : vlib_frame_t *from_frame, int is_ip6, int is_tun,
1013 : u16 async_next_node)
1014 : {
1015 5738 : ipsec_main_t *im = &ipsec_main;
1016 5738 : const u16 *next_by_next_header = im->next_header_registrations;
1017 5738 : u32 thread_index = vm->thread_index;
1018 : u16 len;
1019 5738 : ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1020 5738 : u32 *from = vlib_frame_vector_args (from_frame);
1021 5738 : u32 n_left = from_frame->n_vectors;
1022 5738 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1023 : vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
1024 5738 : u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
1025 5738 : u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts;
1026 5738 : u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
1027 : u32 sync_bi[VLIB_FRAME_SIZE];
1028 : u32 noop_bi[VLIB_FRAME_SIZE];
1029 5738 : esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1030 5738 : esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1031 5738 : esp_decrypt_packet_data_t cpd = { };
1032 5738 : u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1033 5738 : const u8 esp_sz = sizeof (esp_header_t);
1034 5738 : ipsec_sa_t *sa0 = 0;
1035 5738 : vnet_crypto_op_t _op, *op = &_op;
1036 : vnet_crypto_op_t **crypto_ops;
1037 : vnet_crypto_op_t **integ_ops;
1038 5738 : int is_async = im->async_mode;
1039 5738 : vnet_crypto_async_op_id_t async_op = ~0;
1040 : vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
1041 : esp_decrypt_error_t err;
1042 :
1043 5738 : vlib_get_buffers (vm, from, b, n_left);
1044 5738 : if (!is_async)
1045 : {
1046 4833 : vec_reset_length (ptd->crypto_ops);
1047 4833 : vec_reset_length (ptd->integ_ops);
1048 4833 : vec_reset_length (ptd->chained_crypto_ops);
1049 4833 : vec_reset_length (ptd->chained_integ_ops);
1050 : }
1051 5738 : vec_reset_length (ptd->async_frames);
1052 5738 : vec_reset_length (ptd->chunks);
1053 5738 : clib_memset (sync_nexts, -1, sizeof (sync_nexts));
1054 5738 : clib_memset (async_frames, 0, sizeof (async_frames));
1055 :
1056 231219 : while (n_left > 0)
1057 : {
1058 : u8 *payload;
1059 :
1060 225481 : err = ESP_DECRYPT_ERROR_RX_PKTS;
1061 225481 : if (n_left > 2)
1062 : {
1063 : u8 *p;
1064 215455 : vlib_prefetch_buffer_header (b[2], LOAD);
1065 215455 : p = vlib_buffer_get_current (b[1]);
1066 215455 : clib_prefetch_load (p);
1067 215455 : p -= CLIB_CACHE_LINE_BYTES;
1068 215455 : clib_prefetch_load (p);
1069 : }
1070 :
1071 225481 : u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1072 225481 : if (n_bufs == 0)
1073 : {
1074 0 : err = ESP_DECRYPT_ERROR_NO_BUFFERS;
1075 0 : esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1076 : noop_nexts, ESP_DECRYPT_NEXT_DROP,
1077 0 : vnet_buffer (b[0])->ipsec.sad_index);
1078 0 : goto next;
1079 : }
1080 :
1081 225481 : if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1082 : {
1083 5747 : if (current_sa_pkts)
1084 9 : vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1085 : current_sa_index, current_sa_pkts,
1086 : current_sa_bytes);
1087 5747 : current_sa_bytes = current_sa_pkts = 0;
1088 :
1089 5747 : current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1090 5747 : vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
1091 : current_sa_index);
1092 5747 : sa0 = ipsec_sa_get (current_sa_index);
1093 :
1094 : /* fetch the second cacheline ASAP */
1095 5747 : clib_prefetch_load (sa0->cacheline1);
1096 5747 : cpd.icv_sz = sa0->integ_icv_size;
1097 5747 : cpd.iv_sz = sa0->crypto_iv_size;
1098 5747 : cpd.flags = sa0->flags;
1099 5747 : cpd.sa_index = current_sa_index;
1100 5747 : is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
1101 : }
1102 :
1103 225481 : if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
1104 : {
1105 : /* this is the first packet to use this SA, claim the SA
1106 : * for this thread. this could happen simultaneously on
1107 : * another thread */
1108 6 : clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
1109 : ipsec_sa_assign_thread (thread_index));
1110 : }
1111 :
1112 225481 : if (PREDICT_FALSE (thread_index != sa0->thread_index))
1113 : {
1114 240 : vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
1115 240 : err = ESP_DECRYPT_ERROR_HANDOFF;
1116 240 : esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1117 : noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
1118 : current_sa_index);
1119 240 : goto next;
1120 : }
1121 :
1122 : /* store packet data for next round for easier prefetch */
1123 225241 : pd->sa_data = cpd.sa_data;
1124 225241 : pd->current_data = b[0]->current_data;
1125 225241 : pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1126 225241 : payload = b[0]->data + pd->current_data;
1127 225241 : pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1128 225241 : pd->is_chain = 0;
1129 225241 : pd2->lb = b[0];
1130 225241 : pd2->free_buffer_index = 0;
1131 225241 : pd2->icv_removed = 0;
1132 :
1133 225241 : if (n_bufs > 1)
1134 : {
1135 65660 : pd->is_chain = 1;
1136 : /* find last buffer in the chain */
1137 160800 : while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1138 95140 : pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1139 :
1140 65660 : crypto_ops = &ptd->chained_crypto_ops;
1141 65660 : integ_ops = &ptd->chained_integ_ops;
1142 : }
1143 : else
1144 : {
1145 159581 : crypto_ops = &ptd->crypto_ops;
1146 159581 : integ_ops = &ptd->integ_ops;
1147 : }
1148 :
1149 225241 : pd->current_length = b[0]->current_length;
1150 :
1151 : /* anti-reply check */
1152 225241 : if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
1153 : &pd->seq_hi))
1154 : {
1155 3640 : err = ESP_DECRYPT_ERROR_REPLAY;
1156 3640 : esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1157 : noop_nexts, ESP_DECRYPT_NEXT_DROP,
1158 : current_sa_index);
1159 3640 : goto next;
1160 : }
1161 :
1162 221601 : if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1163 : {
1164 1224 : err = ESP_DECRYPT_ERROR_RUNT;
1165 1224 : esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1166 : noop_nexts, ESP_DECRYPT_NEXT_DROP,
1167 : current_sa_index);
1168 1224 : goto next;
1169 : }
1170 :
1171 220377 : len = pd->current_length - cpd.icv_sz;
1172 220377 : current_sa_pkts += 1;
1173 220377 : current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1174 :
1175 220377 : if (is_async)
1176 : {
1177 39305 : async_op = sa0->crypto_async_dec_op_id;
1178 :
1179 : /* get a frame for this op if we don't yet have one or it's full
1180 : */
1181 77745 : if (NULL == async_frames[async_op] ||
1182 38440 : vnet_crypto_async_frame_is_full (async_frames[async_op]))
1183 : {
1184 1385 : async_frames[async_op] =
1185 1385 : vnet_crypto_async_get_frame (vm, async_op);
1186 1385 : if (PREDICT_FALSE (!async_frames[async_op]))
1187 : {
1188 0 : err = ESP_DECRYPT_ERROR_NO_AVAIL_FRAME;
1189 0 : esp_decrypt_set_next_index (
1190 : b[0], node, thread_index, err, n_noop, noop_nexts,
1191 : ESP_DECRYPT_NEXT_DROP, current_sa_index);
1192 0 : goto next;
1193 : }
1194 :
1195 : /* Save the frame to the list we'll submit at the end */
1196 1385 : vec_add1 (ptd->async_frames, async_frames[async_op]);
1197 : }
1198 :
1199 39305 : err = esp_decrypt_prepare_async_frame (
1200 : vm, node, ptd, async_frames[async_op], sa0, payload, len,
1201 39305 : cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
1202 : async_next_node);
1203 39305 : if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1204 : {
1205 0 : esp_decrypt_set_next_index (
1206 : b[0], node, thread_index, err, n_noop, noop_nexts,
1207 : ESP_DECRYPT_NEXT_DROP, current_sa_index);
1208 : }
1209 : }
1210 : else
1211 181072 : esp_decrypt_prepare_sync_op (
1212 : vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
1213 181072 : cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, n_sync);
1214 : /* next */
1215 225481 : next:
1216 225481 : if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1217 : {
1218 5104 : noop_bi[n_noop] = from[b - bufs];
1219 5104 : n_noop++;
1220 : }
1221 220377 : else if (!is_async)
1222 : {
1223 181072 : sync_bi[n_sync] = from[b - bufs];
1224 181072 : sync_bufs[n_sync] = b[0];
1225 181072 : n_sync++;
1226 181072 : sync_next++;
1227 181072 : pd += 1;
1228 181072 : pd2 += 1;
1229 : }
1230 : else
1231 39305 : async_next++;
1232 :
1233 225481 : n_left -= 1;
1234 225481 : b += 1;
1235 : }
1236 :
1237 5738 : if (PREDICT_TRUE (~0 != current_sa_index))
1238 5738 : vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1239 : current_sa_index, current_sa_pkts,
1240 : current_sa_bytes);
1241 :
1242 : /* submit or free all of the open frames */
1243 : vnet_crypto_async_frame_t **async_frame;
1244 :
1245 7123 : vec_foreach (async_frame, ptd->async_frames)
1246 : {
1247 : /* free frame and move on if no ops were successfully added */
1248 1385 : if (PREDICT_FALSE (!(*async_frame)->n_elts))
1249 : {
1250 0 : vnet_crypto_async_free_frame (vm, *async_frame);
1251 0 : continue;
1252 : }
1253 1385 : if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1254 : {
1255 0 : n_noop += esp_async_recycle_failed_submit (
1256 : vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1257 : IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
1258 : ESP_DECRYPT_NEXT_DROP, false);
1259 0 : vnet_crypto_async_reset_frame (*async_frame);
1260 0 : vnet_crypto_async_free_frame (vm, *async_frame);
1261 : }
1262 : }
1263 :
1264 5738 : if (n_sync)
1265 : {
1266 4551 : esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1267 : ESP_DECRYPT_ERROR_INTEG_ERROR);
1268 4551 : esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1269 : sync_nexts, ptd->chunks,
1270 : ESP_DECRYPT_ERROR_INTEG_ERROR);
1271 :
1272 4551 : esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1273 : ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1274 4551 : esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1275 : sync_nexts, ptd->chunks,
1276 : ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1277 : }
1278 :
1279 : /* Post decryption ronud - adjust packet data start and length and next
1280 : node */
1281 :
1282 5738 : n_left = n_sync;
1283 5738 : sync_next = sync_nexts;
1284 5738 : pd = pkt_data;
1285 5738 : pd2 = pkt_data2;
1286 5738 : b = sync_bufs;
1287 :
1288 186810 : while (n_left)
1289 : {
1290 181072 : if (n_left >= 2)
1291 : {
1292 176521 : void *data = b[1]->data + pd[1].current_data;
1293 :
1294 : /* buffer metadata */
1295 176521 : vlib_prefetch_buffer_header (b[1], LOAD);
1296 :
1297 : /* esp_footer_t */
1298 176521 : CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1299 : CLIB_CACHE_LINE_BYTES, LOAD);
1300 :
1301 : /* packet headers */
1302 176521 : CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
1303 : CLIB_CACHE_LINE_BYTES * 2, LOAD);
1304 : }
1305 :
1306 : /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1307 181072 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1308 181072 : current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1309 :
1310 181072 : if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
1311 179124 : esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0],
1312 : sync_next, is_ip6, is_tun, 0);
1313 :
1314 : /* trace: */
1315 181072 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1316 : {
1317 : esp_decrypt_trace_t *tr;
1318 181072 : tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1319 181072 : sa0 = ipsec_sa_get (current_sa_index);
1320 181072 : tr->crypto_alg = sa0->crypto_alg;
1321 181072 : tr->integ_alg = sa0->integ_alg;
1322 181072 : tr->seq = pd->seq;
1323 181072 : tr->sa_seq = sa0->seq;
1324 181072 : tr->sa_seq_hi = sa0->seq_hi;
1325 181072 : tr->pkt_seq_hi = pd->seq_hi;
1326 : }
1327 :
1328 : /* next */
1329 181072 : n_left -= 1;
1330 181072 : sync_next += 1;
1331 181072 : pd += 1;
1332 181072 : pd2 += 1;
1333 181072 : b += 1;
1334 : }
1335 :
1336 5738 : vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
1337 5738 : from_frame->n_vectors);
1338 :
1339 5738 : if (n_sync)
1340 4551 : vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1341 :
1342 5738 : if (n_noop)
1343 398 : vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1344 :
1345 5738 : return (from_frame->n_vectors);
1346 : }
1347 :
1348 : always_inline uword
1349 1340 : esp_decrypt_post_inline (vlib_main_t * vm,
1350 : vlib_node_runtime_t * node,
1351 : vlib_frame_t * from_frame, int is_ip6, int is_tun)
1352 : {
1353 1340 : const ipsec_main_t *im = &ipsec_main;
1354 1340 : const u16 *next_by_next_header = im->next_header_registrations;
1355 1340 : u32 *from = vlib_frame_vector_args (from_frame);
1356 1340 : u32 n_left = from_frame->n_vectors;
1357 1340 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1358 1340 : u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1359 1340 : vlib_get_buffers (vm, from, b, n_left);
1360 :
1361 40355 : while (n_left > 0)
1362 : {
1363 39015 : esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1364 :
1365 39015 : if (n_left > 2)
1366 : {
1367 36490 : vlib_prefetch_buffer_header (b[2], LOAD);
1368 36490 : vlib_prefetch_buffer_header (b[1], LOAD);
1369 : }
1370 :
1371 39015 : if (!pd->is_chain)
1372 18379 : esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0],
1373 : next, is_ip6, is_tun, 1);
1374 : else
1375 : {
1376 20636 : esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
1377 20636 : esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2,
1378 : b[0], next, is_ip6, is_tun, 1);
1379 : }
1380 :
1381 : /*trace: */
1382 39015 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1383 : {
1384 39015 : ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
1385 : esp_decrypt_trace_t *tr;
1386 39015 : esp_decrypt_packet_data_t *async_pd =
1387 39015 : &(esp_post_data (b[0]))->decrypt_data;
1388 39015 : tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1389 39015 : sa0 = ipsec_sa_get (async_pd->sa_index);
1390 :
1391 39015 : tr->crypto_alg = sa0->crypto_alg;
1392 39015 : tr->integ_alg = sa0->integ_alg;
1393 39015 : tr->seq = pd->seq;
1394 39015 : tr->sa_seq = sa0->seq;
1395 39015 : tr->sa_seq_hi = sa0->seq_hi;
1396 : }
1397 :
1398 39015 : n_left--;
1399 39015 : next++;
1400 39015 : b++;
1401 : }
1402 :
1403 1340 : n_left = from_frame->n_vectors;
1404 1340 : vlib_node_increment_counter (vm, node->node_index,
1405 : ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1406 :
1407 1340 : vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1408 :
1409 1340 : return n_left;
1410 : }
1411 :
1412 6860 : VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1413 : vlib_node_runtime_t * node,
1414 : vlib_frame_t * from_frame)
1415 : {
1416 9248 : return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1417 4624 : esp_decrypt_async_next.esp4_post_next);
1418 : }
1419 :
1420 3176 : VLIB_NODE_FN (esp4_decrypt_post_node) (vlib_main_t * vm,
1421 : vlib_node_runtime_t * node,
1422 : vlib_frame_t * from_frame)
1423 : {
1424 940 : return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1425 : }
1426 :
1427 2428 : VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1428 : vlib_node_runtime_t * node,
1429 : vlib_frame_t * from_frame)
1430 : {
1431 384 : return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1432 192 : esp_decrypt_async_next.esp4_tun_post_next);
1433 : }
1434 :
1435 2236 : VLIB_NODE_FN (esp4_decrypt_tun_post_node) (vlib_main_t * vm,
1436 : vlib_node_runtime_t * node,
1437 : vlib_frame_t * from_frame)
1438 : {
1439 0 : return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1440 : }
1441 :
1442 3076 : VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1443 : vlib_node_runtime_t * node,
1444 : vlib_frame_t * from_frame)
1445 : {
1446 1680 : return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1447 840 : esp_decrypt_async_next.esp6_post_next);
1448 : }
1449 :
1450 2636 : VLIB_NODE_FN (esp6_decrypt_post_node) (vlib_main_t * vm,
1451 : vlib_node_runtime_t * node,
1452 : vlib_frame_t * from_frame)
1453 : {
1454 400 : return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1455 : }
1456 :
1457 2318 : VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1458 : vlib_node_runtime_t * node,
1459 : vlib_frame_t * from_frame)
1460 : {
1461 164 : return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1462 82 : esp_decrypt_async_next.esp6_tun_post_next);
1463 : }
1464 :
1465 2236 : VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
1466 : vlib_node_runtime_t * node,
1467 : vlib_frame_t * from_frame)
1468 : {
1469 0 : return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1470 : }
1471 :
1472 : /* *INDENT-OFF* */
1473 178120 : VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1474 : .name = "esp4-decrypt",
1475 : .vector_size = sizeof (u32),
1476 : .format_trace = format_esp_decrypt_trace,
1477 : .type = VLIB_NODE_TYPE_INTERNAL,
1478 :
1479 : .n_errors = ESP_DECRYPT_N_ERROR,
1480 : .error_counters = esp_decrypt_error_counters,
1481 :
1482 : .n_next_nodes = ESP_DECRYPT_N_NEXT,
1483 : .next_nodes = {
1484 : [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1485 : [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1486 : [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1487 : [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1488 : [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1489 : [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1490 : },
1491 : };
1492 :
1493 178120 : VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
1494 : .name = "esp4-decrypt-post",
1495 : .vector_size = sizeof (u32),
1496 : .format_trace = format_esp_decrypt_trace,
1497 : .type = VLIB_NODE_TYPE_INTERNAL,
1498 :
1499 : .n_errors = ESP_DECRYPT_N_ERROR,
1500 : .error_counters = esp_decrypt_error_counters,
1501 :
1502 : .sibling_of = "esp4-decrypt",
1503 : };
1504 :
1505 178120 : VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1506 : .name = "esp6-decrypt",
1507 : .vector_size = sizeof (u32),
1508 : .format_trace = format_esp_decrypt_trace,
1509 : .type = VLIB_NODE_TYPE_INTERNAL,
1510 :
1511 : .n_errors = ESP_DECRYPT_N_ERROR,
1512 : .error_counters = esp_decrypt_error_counters,
1513 :
1514 : .n_next_nodes = ESP_DECRYPT_N_NEXT,
1515 : .next_nodes = {
1516 : [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1517 : [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1518 : [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1519 : [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1520 : [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1521 : [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1522 : },
1523 : };
1524 :
1525 178120 : VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
1526 : .name = "esp6-decrypt-post",
1527 : .vector_size = sizeof (u32),
1528 : .format_trace = format_esp_decrypt_trace,
1529 : .type = VLIB_NODE_TYPE_INTERNAL,
1530 :
1531 : .n_errors = ESP_DECRYPT_N_ERROR,
1532 : .error_counters = esp_decrypt_error_counters,
1533 :
1534 : .sibling_of = "esp6-decrypt",
1535 : };
1536 :
1537 178120 : VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1538 : .name = "esp4-decrypt-tun",
1539 : .vector_size = sizeof (u32),
1540 : .format_trace = format_esp_decrypt_trace,
1541 : .type = VLIB_NODE_TYPE_INTERNAL,
1542 : .n_errors = ESP_DECRYPT_N_ERROR,
1543 : .error_counters = esp_decrypt_error_counters,
1544 : .n_next_nodes = ESP_DECRYPT_N_NEXT,
1545 : .next_nodes = {
1546 : [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1547 : [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1548 : [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1549 : [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1550 : [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1551 : [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1552 : },
1553 : };
1554 :
1555 178120 : VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
1556 : .name = "esp4-decrypt-tun-post",
1557 : .vector_size = sizeof (u32),
1558 : .format_trace = format_esp_decrypt_trace,
1559 : .type = VLIB_NODE_TYPE_INTERNAL,
1560 :
1561 : .n_errors = ESP_DECRYPT_N_ERROR,
1562 : .error_counters = esp_decrypt_error_counters,
1563 :
1564 : .sibling_of = "esp4-decrypt-tun",
1565 : };
1566 :
1567 178120 : VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1568 : .name = "esp6-decrypt-tun",
1569 : .vector_size = sizeof (u32),
1570 : .format_trace = format_esp_decrypt_trace,
1571 : .type = VLIB_NODE_TYPE_INTERNAL,
1572 : .n_errors = ESP_DECRYPT_N_ERROR,
1573 : .error_counters = esp_decrypt_error_counters,
1574 : .n_next_nodes = ESP_DECRYPT_N_NEXT,
1575 : .next_nodes = {
1576 : [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1577 : [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1578 : [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1579 : [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1580 : [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1581 : [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1582 : },
1583 : };
1584 :
1585 178120 : VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
1586 : .name = "esp6-decrypt-tun-post",
1587 : .vector_size = sizeof (u32),
1588 : .format_trace = format_esp_decrypt_trace,
1589 : .type = VLIB_NODE_TYPE_INTERNAL,
1590 :
1591 : .n_errors = ESP_DECRYPT_N_ERROR,
1592 : .error_counters = esp_decrypt_error_counters,
1593 :
1594 : .sibling_of = "esp6-decrypt-tun",
1595 : };
1596 : /* *INDENT-ON* */
1597 :
1598 : #ifndef CLIB_MARCH_VARIANT
1599 :
1600 : static clib_error_t *
1601 559 : esp_decrypt_init (vlib_main_t *vm)
1602 : {
1603 559 : ipsec_main_t *im = &ipsec_main;
1604 :
1605 559 : im->esp4_dec_fq_index =
1606 559 : vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
1607 559 : im->esp6_dec_fq_index =
1608 559 : vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
1609 559 : im->esp4_dec_tun_fq_index =
1610 559 : vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
1611 559 : im->esp6_dec_tun_fq_index =
1612 559 : vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
1613 :
1614 559 : return 0;
1615 : }
1616 :
1617 54319 : VLIB_INIT_FUNCTION (esp_decrypt_init);
1618 :
1619 : #endif
1620 :
1621 : /*
1622 : * fd.io coding-style-patch-verification: ON
1623 : *
1624 : * Local Variables:
1625 : * eval: (c-set-style "gnu")
1626 : * End:
1627 : */
|