Line data Source code
1 : /*
2 : * esp_encrypt.c : IPSec ESP encrypt node
3 : *
4 : * Copyright (c) 2015 Cisco and/or its affiliates.
5 : * Licensed under the Apache License, Version 2.0 (the "License");
6 : * you may not use this file except in compliance with the License.
7 : * You may obtain a copy of the License at:
8 : *
9 : * http://www.apache.org/licenses/LICENSE-2.0
10 : *
11 : * Unless required by applicable law or agreed to in writing, software
12 : * distributed under the License is distributed on an "AS IS" BASIS,
13 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 : * See the License for the specific language governing permissions and
15 : * limitations under the License.
16 : */
17 :
18 : #include <vnet/vnet.h>
19 : #include <vnet/api_errno.h>
20 : #include <vnet/ip/ip.h>
21 :
22 : #include <vnet/crypto/crypto.h>
23 :
24 : #include <vnet/ipsec/ipsec.h>
25 : #include <vnet/ipsec/ipsec_tun.h>
26 : #include <vnet/ipsec/ipsec.api_enum.h>
27 : #include <vnet/ipsec/esp.h>
28 : #include <vnet/tunnel/tunnel_dp.h>
29 :
30 : #define foreach_esp_encrypt_next \
31 : _ (DROP4, "ip4-drop") \
32 : _ (DROP6, "ip6-drop") \
33 : _ (DROP_MPLS, "mpls-drop") \
34 : _ (HANDOFF4, "handoff4") \
35 : _ (HANDOFF6, "handoff6") \
36 : _ (HANDOFF_MPLS, "handoff-mpls") \
37 : _ (INTERFACE_OUTPUT, "interface-output")
38 :
39 : #define _(v, s) ESP_ENCRYPT_NEXT_##v,
40 : typedef enum
41 : {
42 : foreach_esp_encrypt_next
43 : #undef _
44 : ESP_ENCRYPT_N_NEXT,
45 : } esp_encrypt_next_t;
46 :
47 : typedef struct
48 : {
49 : u32 sa_index;
50 : u32 spi;
51 : u32 seq;
52 : u32 sa_seq_hi;
53 : u8 udp_encap;
54 : ipsec_crypto_alg_t crypto_alg;
55 : ipsec_integ_alg_t integ_alg;
56 : } esp_encrypt_trace_t;
57 :
58 : typedef struct
59 : {
60 : u32 next_index;
61 : } esp_encrypt_post_trace_t;
62 :
63 : typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
64 :
65 : /* packet trace format function */
66 : static u8 *
67 171766 : format_esp_encrypt_trace (u8 * s, va_list * args)
68 : {
69 171766 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
70 171766 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
71 171766 : esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
72 :
73 : s =
74 171766 : format (s,
75 : "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
76 : t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
77 : format_ipsec_crypto_alg,
78 171766 : t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
79 171766 : t->udp_encap ? " udp-encap-enabled" : "");
80 171766 : return s;
81 : }
82 :
83 : static u8 *
84 1030 : format_esp_post_encrypt_trace (u8 * s, va_list * args)
85 : {
86 1030 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
87 1030 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
88 1030 : esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
89 :
90 1030 : s = format (s, "esp-post: next node index %u", t->next_index);
91 1030 : return s;
92 : }
93 :
94 : /* pad packet in input buffer */
95 : static_always_inline u8 *
96 222622 : esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
97 : u8 icv_sz, vlib_node_runtime_t *node,
98 : u16 buffer_data_size, uword total_len)
99 : {
100 : static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
101 : 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
102 : 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
103 : };
104 :
105 222622 : u16 min_length = total_len + sizeof (esp_footer_t);
106 222622 : u16 new_length = round_pow2 (min_length, esp_align);
107 222622 : u8 pad_bytes = new_length - min_length;
108 222622 : esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
109 222622 : last[0]->current_length + pad_bytes);
110 222622 : u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
111 :
112 222622 : if (last[0]->current_data + last[0]->current_length + tail_sz >
113 : buffer_data_size)
114 : {
115 24656 : u32 tmp_bi = 0;
116 24656 : if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
117 0 : return 0;
118 :
119 24656 : vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
120 24656 : last[0]->next_buffer = tmp_bi;
121 24656 : last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
122 24656 : f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
123 24656 : tmp->current_length += tail_sz;
124 24656 : last[0] = tmp;
125 : }
126 : else
127 197966 : last[0]->current_length += tail_sz;
128 :
129 222622 : f->pad_length = pad_bytes;
130 222622 : if (pad_bytes)
131 : {
132 153187 : ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
133 153187 : pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
134 153187 : clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
135 : }
136 :
137 222622 : return &f->next_header;
138 : }
139 :
140 : static_always_inline void
141 160713 : esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
142 : {
143 : ip_csum_t sum;
144 : u16 old_len;
145 :
146 160713 : len = clib_net_to_host_u16 (len);
147 160713 : old_len = ip4->length;
148 :
149 160713 : if (is_transport)
150 : {
151 122965 : u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
152 :
153 122965 : sum = ip_csum_update (ip4->checksum, ip4->protocol,
154 : prot, ip4_header_t, protocol);
155 122965 : ip4->protocol = prot;
156 :
157 122965 : sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
158 : }
159 : else
160 37748 : sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
161 :
162 160713 : ip4->length = len;
163 160713 : ip4->checksum = ip_csum_fold (sum);
164 160713 : }
165 :
166 : static_always_inline void
167 1812 : esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
168 : {
169 1812 : clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
170 1812 : udp->length = clib_net_to_host_u16 (len);
171 1812 : }
172 :
173 : static_always_inline u8
174 32535 : ext_hdr_is_pre_esp (u8 nexthdr)
175 : {
176 : #ifdef CLIB_HAVE_VEC128
177 : static const u8x16 ext_hdr_types = {
178 : IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
179 : IP_PROTOCOL_IPV6_ROUTE,
180 : IP_PROTOCOL_IPV6_FRAGMENTATION,
181 : };
182 :
183 32535 : return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
184 : #else
185 : return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
186 : (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
187 : ((nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0));
188 : #endif
189 : }
190 :
191 : static_always_inline u8
192 32530 : esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
193 : {
194 : /* this code assumes that HbH, route and frag headers will be before
195 : others, if that is not the case, they will end up encrypted */
196 32530 : u8 len = sizeof (ip6_header_t);
197 : ip6_ext_header_t *p;
198 :
199 : /* if next packet doesn't have ext header */
200 32530 : if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
201 : {
202 32527 : *ext_hdr = NULL;
203 32527 : return len;
204 : }
205 :
206 3 : p = ip6_next_header (ip6);
207 3 : len += ip6_ext_header_len (p);
208 5 : while (ext_hdr_is_pre_esp (p->next_hdr))
209 : {
210 2 : len += ip6_ext_header_len (p);
211 2 : p = ip6_ext_next_header (p);
212 : }
213 :
214 3 : *ext_hdr = p;
215 3 : return len;
216 : }
217 :
218 : /* IPsec IV generation: IVs requirements differ depending of the
219 : * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
220 : * be predictable but should never be reused with the same key material
221 : * for CTR and GCM.
222 : * To avoid reusing the same IVs between multiple VPP instances and between
223 : * restarts, we use a properly chosen PRNG to generate IVs. To ensure the IV is
224 : * unpredictable for CBC, it is then encrypted using the same key as the
225 : * message. You can refer to NIST SP800-38a and NIST SP800-38d for more
226 : * details. */
227 : static_always_inline void *
228 207928 : esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
229 : {
230 207928 : ASSERT (iv_sz >= sizeof (u64));
231 207928 : u64 *iv = (u64 *) (payload - iv_sz);
232 207928 : clib_memset_u8 (iv, 0, iv_sz);
233 207928 : *iv = clib_pcg64i_random_r (&sa->iv_prng);
234 207928 : return iv;
235 : }
236 :
237 : static_always_inline void
238 8496 : esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
239 : vnet_crypto_op_t * ops, vlib_buffer_t * b[],
240 : u16 * nexts, vnet_crypto_op_chunk_t * chunks,
241 : u16 drop_next)
242 : {
243 8496 : u32 n_fail, n_ops = vec_len (ops);
244 8496 : vnet_crypto_op_t *op = ops;
245 :
246 8496 : if (n_ops == 0)
247 7492 : return;
248 :
249 1004 : n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
250 :
251 1004 : while (n_fail)
252 : {
253 0 : ASSERT (op - ops < n_ops);
254 :
255 0 : if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
256 : {
257 0 : u32 bi = op->user_data;
258 0 : esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
259 : ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
260 : bi, nexts, drop_next,
261 0 : vnet_buffer (b[bi])->ipsec.sad_index);
262 0 : n_fail--;
263 : }
264 0 : op++;
265 : }
266 : }
267 :
268 : static_always_inline void
269 8496 : esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
270 : vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
271 : u16 drop_next)
272 : {
273 8496 : u32 n_fail, n_ops = vec_len (ops);
274 8496 : vnet_crypto_op_t *op = ops;
275 :
276 8496 : if (n_ops == 0)
277 2430 : return;
278 :
279 6066 : n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
280 :
281 6066 : while (n_fail)
282 : {
283 0 : ASSERT (op - ops < n_ops);
284 :
285 0 : if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
286 : {
287 0 : u32 bi = op->user_data;
288 0 : esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
289 : ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
290 : bi, nexts, drop_next,
291 0 : vnet_buffer (b[bi])->ipsec.sad_index);
292 0 : n_fail--;
293 : }
294 0 : op++;
295 : }
296 : }
297 :
298 : static_always_inline u32
299 56816 : esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
300 : ipsec_sa_t * sa0, vlib_buffer_t * b,
301 : vlib_buffer_t * lb, u8 icv_sz, u8 * start,
302 : u32 start_len, u16 * n_ch)
303 : {
304 : vnet_crypto_op_chunk_t *ch;
305 56816 : vlib_buffer_t *cb = b;
306 56816 : u32 n_chunks = 1;
307 : u32 total_len;
308 56816 : vec_add2 (ptd->chunks, ch, 1);
309 56816 : total_len = ch->len = start_len;
310 56816 : ch->src = ch->dst = start;
311 56816 : cb = vlib_get_buffer (vm, cb->next_buffer);
312 :
313 : while (1)
314 : {
315 73164 : vec_add2 (ptd->chunks, ch, 1);
316 73164 : n_chunks += 1;
317 73164 : if (lb == cb)
318 56816 : total_len += ch->len = cb->current_length - icv_sz;
319 : else
320 16348 : total_len += ch->len = cb->current_length;
321 73164 : ch->src = ch->dst = vlib_buffer_get_current (cb);
322 :
323 73164 : if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
324 56816 : break;
325 :
326 16348 : cb = vlib_get_buffer (vm, cb->next_buffer);
327 : }
328 :
329 56816 : if (n_ch)
330 37520 : *n_ch = n_chunks;
331 :
332 56816 : return total_len;
333 : }
334 :
335 : static_always_inline u32
336 49044 : esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
337 : ipsec_sa_t * sa0, vlib_buffer_t * b,
338 : vlib_buffer_t * lb, u8 icv_sz, u8 * start,
339 : u32 start_len, u8 * digest, u16 * n_ch)
340 : {
341 : vnet_crypto_op_chunk_t *ch;
342 49044 : vlib_buffer_t *cb = b;
343 49044 : u32 n_chunks = 1;
344 : u32 total_len;
345 49044 : vec_add2 (ptd->chunks, ch, 1);
346 49044 : total_len = ch->len = start_len;
347 49044 : ch->src = start;
348 49044 : cb = vlib_get_buffer (vm, cb->next_buffer);
349 :
350 : while (1)
351 : {
352 62712 : vec_add2 (ptd->chunks, ch, 1);
353 62712 : n_chunks += 1;
354 62712 : if (lb == cb)
355 : {
356 49044 : total_len += ch->len = cb->current_length - icv_sz;
357 49044 : if (ipsec_sa_is_set_USE_ESN (sa0))
358 : {
359 24522 : u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
360 24522 : clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
361 24522 : ch->len += sizeof (seq_hi);
362 24522 : total_len += sizeof (seq_hi);
363 : }
364 : }
365 : else
366 13668 : total_len += ch->len = cb->current_length;
367 62712 : ch->src = vlib_buffer_get_current (cb);
368 :
369 62712 : if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
370 49044 : break;
371 :
372 13668 : cb = vlib_get_buffer (vm, cb->next_buffer);
373 : }
374 :
375 49044 : if (n_ch)
376 29748 : *n_ch = n_chunks;
377 :
378 49044 : return total_len;
379 : }
380 :
381 : always_inline void
382 180723 : esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
383 : vnet_crypto_op_t **crypto_ops,
384 : vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
385 : u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
386 : vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
387 : esp_header_t *esp)
388 : {
389 180723 : if (sa0->crypto_enc_op_id)
390 : {
391 : vnet_crypto_op_t *op;
392 166029 : vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
393 166029 : vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
394 166029 : u8 *crypto_start = payload;
395 : /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
396 : * have enough space for ESP header and footer which includes ICV */
397 166029 : ASSERT (payload_len > icv_sz);
398 166029 : u16 crypto_len = payload_len - icv_sz;
399 :
400 : /* generate the IV in front of the payload */
401 166029 : void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
402 :
403 166029 : op->key_index = sa0->crypto_key_index;
404 166029 : op->user_data = bi;
405 :
406 166029 : if (ipsec_sa_is_set_IS_CTR (sa0))
407 : {
408 : /* construct nonce in a scratch space in front of the IP header */
409 86259 : esp_ctr_nonce_t *nonce =
410 86259 : (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
411 86259 : if (ipsec_sa_is_set_IS_AEAD (sa0))
412 : {
413 : /* constuct aad in a scratch space in front of the nonce */
414 43701 : op->aad = (u8 *) nonce - sizeof (esp_aead_t);
415 43701 : op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
416 43701 : op->tag = payload + crypto_len;
417 43701 : op->tag_len = 16;
418 : }
419 : else
420 : {
421 42558 : nonce->ctr = clib_host_to_net_u32 (1);
422 : }
423 :
424 86259 : nonce->salt = sa0->salt;
425 86259 : nonce->iv = *(u64 *) pkt_iv;
426 86259 : op->iv = (u8 *) nonce;
427 : }
428 : else
429 : {
430 : /* construct zero iv in front of the IP header */
431 79770 : op->iv = pkt_iv - hdr_len - iv_sz;
432 79770 : clib_memset_u8 (op->iv, 0, iv_sz);
433 : /* include iv field in crypto */
434 79770 : crypto_start -= iv_sz;
435 79770 : crypto_len += iv_sz;
436 : }
437 :
438 166029 : if (PREDICT_FALSE (lb != b[0]))
439 : {
440 : /* is chained */
441 37520 : op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
442 37520 : op->chunk_index = vec_len (ptd->chunks);
443 37520 : op->tag = vlib_buffer_get_tail (lb) - icv_sz;
444 37520 : esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
445 37520 : crypto_start, crypto_len + icv_sz,
446 37520 : &op->n_chunks);
447 : }
448 : else
449 : {
450 : /* not chained */
451 128509 : op->src = op->dst = crypto_start;
452 128509 : op->len = crypto_len;
453 : }
454 : }
455 :
456 180723 : if (sa0->integ_op_id)
457 : {
458 : vnet_crypto_op_t *op;
459 136895 : vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
460 136895 : vnet_crypto_op_init (op, sa0->integ_op_id);
461 136895 : op->src = payload - iv_sz - sizeof (esp_header_t);
462 136895 : op->digest = payload + payload_len - icv_sz;
463 136895 : op->key_index = sa0->integ_key_index;
464 136895 : op->digest_len = icv_sz;
465 136895 : op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
466 136895 : op->user_data = bi;
467 :
468 136895 : if (lb != b[0])
469 : {
470 : /* is chained */
471 29748 : op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
472 29748 : op->chunk_index = vec_len (ptd->chunks);
473 29748 : op->digest = vlib_buffer_get_tail (lb) - icv_sz;
474 :
475 29748 : esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
476 29748 : payload - iv_sz - sizeof (esp_header_t),
477 29748 : payload_len + iv_sz +
478 29748 : sizeof (esp_header_t), op->digest,
479 29748 : &op->n_chunks);
480 : }
481 107147 : else if (ipsec_sa_is_set_USE_ESN (sa0))
482 : {
483 42278 : u32 tmp = clib_net_to_host_u32 (seq_hi);
484 42278 : clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
485 42278 : op->len += sizeof (seq_hi);
486 : }
487 : }
488 180723 : }
489 :
490 : static_always_inline void
491 41899 : esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
492 : vnet_crypto_async_frame_t *async_frame,
493 : ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
494 : u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
495 : u32 bi, u16 next, u32 hdr_len, u16 async_next,
496 : vlib_buffer_t *lb)
497 : {
498 41899 : esp_post_data_t *post = esp_post_data (b);
499 41899 : u8 *tag, *iv, *aad = 0;
500 41899 : u8 flag = 0;
501 41899 : const u32 key_index = sa->crypto_key_index;
502 : i16 crypto_start_offset, integ_start_offset;
503 : u16 crypto_total_len, integ_total_len;
504 :
505 41899 : post->next_index = next;
506 :
507 : /* crypto */
508 41899 : crypto_start_offset = integ_start_offset = payload - b->data;
509 41899 : crypto_total_len = integ_total_len = payload_len - icv_sz;
510 41899 : tag = payload + crypto_total_len;
511 :
512 : /* generate the IV in front of the payload */
513 41899 : void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
514 :
515 41899 : if (ipsec_sa_is_set_IS_CTR (sa))
516 : {
517 : /* construct nonce in a scratch space in front of the IP header */
518 24321 : esp_ctr_nonce_t *nonce =
519 24321 : (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
520 24321 : if (ipsec_sa_is_set_IS_AEAD (sa))
521 : {
522 : /* constuct aad in a scratch space in front of the nonce */
523 24321 : aad = (u8 *) nonce - sizeof (esp_aead_t);
524 24321 : esp_aad_fill (aad, esp, sa, sa->seq_hi);
525 : }
526 : else
527 : {
528 0 : nonce->ctr = clib_host_to_net_u32 (1);
529 : }
530 :
531 24321 : nonce->salt = sa->salt;
532 24321 : nonce->iv = *(u64 *) pkt_iv;
533 24321 : iv = (u8 *) nonce;
534 : }
535 : else
536 : {
537 : /* construct zero iv in front of the IP header */
538 17578 : iv = pkt_iv - hdr_len - iv_sz;
539 17578 : clib_memset_u8 (iv, 0, iv_sz);
540 : /* include iv field in crypto */
541 17578 : crypto_start_offset -= iv_sz;
542 17578 : crypto_total_len += iv_sz;
543 : }
544 :
545 41899 : if (lb != b)
546 : {
547 : /* chain */
548 19296 : flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
549 19296 : tag = vlib_buffer_get_tail (lb) - icv_sz;
550 19296 : crypto_total_len = esp_encrypt_chain_crypto (
551 19296 : vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
552 19296 : crypto_total_len + icv_sz, 0);
553 : }
554 :
555 41899 : if (sa->integ_op_id)
556 : {
557 41899 : integ_start_offset -= iv_sz + sizeof (esp_header_t);
558 41899 : integ_total_len += iv_sz + sizeof (esp_header_t);
559 :
560 41899 : if (b != lb)
561 : {
562 19296 : integ_total_len = esp_encrypt_chain_integ (
563 : vm, ptd, sa, b, lb, icv_sz,
564 19296 : payload - iv_sz - sizeof (esp_header_t),
565 19296 : payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
566 : }
567 22603 : else if (ipsec_sa_is_set_USE_ESN (sa))
568 : {
569 10875 : u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
570 10875 : clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
571 10875 : integ_total_len += sizeof (seq_hi);
572 : }
573 : }
574 :
575 : /* this always succeeds because we know the frame is not full */
576 41899 : vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
577 41899 : integ_total_len - crypto_total_len,
578 : crypto_start_offset, integ_start_offset, bi,
579 : async_next, iv, tag, aad, flag);
580 41899 : }
581 :
582 : always_inline uword
583 5441 : esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
584 : vlib_frame_t *frame, vnet_link_t lt, int is_tun,
585 : u16 async_next_node)
586 : {
587 5441 : ipsec_main_t *im = &ipsec_main;
588 5441 : ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
589 5441 : u32 *from = vlib_frame_vector_args (frame);
590 5441 : u32 n_left = frame->n_vectors;
591 5441 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
592 5441 : u32 thread_index = vm->thread_index;
593 5441 : u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
594 5441 : u32 current_sa_index = ~0, current_sa_packets = 0;
595 5441 : u32 current_sa_bytes = 0, spi = 0;
596 5441 : u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
597 5441 : ipsec_sa_t *sa0 = 0;
598 : vlib_buffer_t *lb;
599 5441 : vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
600 5441 : vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
601 : vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
602 5441 : int is_async = im->async_mode;
603 5441 : vnet_crypto_async_op_id_t async_op = ~0;
604 5441 : u16 drop_next =
605 : (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
606 : (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
607 : ESP_ENCRYPT_NEXT_DROP_MPLS));
608 5441 : u16 handoff_next = (lt == VNET_LINK_IP6 ?
609 : ESP_ENCRYPT_NEXT_HANDOFF6 :
610 : (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
611 : ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
612 : vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
613 5441 : u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
614 5441 : u16 n_async = 0;
615 5441 : u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
616 : u32 sync_bi[VLIB_FRAME_SIZE];
617 : u32 noop_bi[VLIB_FRAME_SIZE];
618 : esp_encrypt_error_t err;
619 :
620 5441 : vlib_get_buffers (vm, from, b, n_left);
621 :
622 5441 : vec_reset_length (ptd->crypto_ops);
623 5441 : vec_reset_length (ptd->integ_ops);
624 5441 : vec_reset_length (ptd->chained_crypto_ops);
625 5441 : vec_reset_length (ptd->chained_integ_ops);
626 5441 : vec_reset_length (ptd->async_frames);
627 5441 : vec_reset_length (ptd->chunks);
628 5441 : clib_memset (async_frames, 0, sizeof (async_frames));
629 :
630 230110 : while (n_left > 0)
631 : {
632 : u32 sa_index0;
633 : dpo_id_t *dpo;
634 : esp_header_t *esp;
635 : u8 *payload, *next_hdr_ptr;
636 : u16 payload_len, payload_len_total, n_bufs;
637 : u32 hdr_len;
638 :
639 224669 : err = ESP_ENCRYPT_ERROR_RX_PKTS;
640 :
641 224669 : if (n_left > 2)
642 : {
643 : u8 *p;
644 215132 : vlib_prefetch_buffer_header (b[2], LOAD);
645 215132 : p = vlib_buffer_get_current (b[1]);
646 215132 : clib_prefetch_load (p);
647 215132 : p -= CLIB_CACHE_LINE_BYTES;
648 215132 : clib_prefetch_load (p);
649 : /* speculate that the trailer goes in the first buffer */
650 215132 : CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
651 : CLIB_CACHE_LINE_BYTES, LOAD);
652 : }
653 :
654 224669 : if (is_tun)
655 : {
656 : /* we are on a ipsec tunnel's feature arc */
657 19715 : vnet_buffer (b[0])->ipsec.sad_index =
658 19715 : sa_index0 = ipsec_tun_protect_get_sa_out
659 19715 : (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
660 :
661 19715 : if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
662 : {
663 63 : err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
664 63 : noop_nexts[n_noop] = drop_next;
665 63 : b[0]->error = node->errors[err];
666 63 : goto trace;
667 : }
668 : }
669 : else
670 204954 : sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
671 :
672 224606 : if (sa_index0 != current_sa_index)
673 : {
674 10544 : if (current_sa_packets)
675 4085 : vlib_increment_combined_counter (
676 : &ipsec_sa_counters, thread_index, current_sa_index,
677 : current_sa_packets, current_sa_bytes);
678 10544 : current_sa_packets = current_sa_bytes = 0;
679 :
680 10544 : sa0 = ipsec_sa_get (sa_index0);
681 :
682 10544 : if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
683 : sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
684 : !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
685 : {
686 1 : err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
687 1 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
688 : n_noop, noop_nexts, drop_next,
689 : sa_index0);
690 1 : goto trace;
691 : }
692 10543 : current_sa_index = sa_index0;
693 10543 : vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
694 : current_sa_index);
695 :
696 : /* fetch the second cacheline ASAP */
697 10543 : clib_prefetch_load (sa0->cacheline1);
698 :
699 10543 : spi = clib_net_to_host_u32 (sa0->spi);
700 10543 : esp_align = sa0->esp_block_align;
701 10543 : icv_sz = sa0->integ_icv_size;
702 10543 : iv_sz = sa0->crypto_iv_size;
703 10543 : is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
704 : }
705 :
706 224605 : if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
707 : {
708 : /* this is the first packet to use this SA, claim the SA
709 : * for this thread. this could happen simultaneously on
710 : * another thread */
711 9 : clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
712 : ipsec_sa_assign_thread (thread_index));
713 : }
714 :
715 224605 : if (PREDICT_FALSE (thread_index != sa0->thread_index))
716 : {
717 1143 : vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
718 1143 : err = ESP_ENCRYPT_ERROR_HANDOFF;
719 1143 : esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
720 : noop_nexts, handoff_next,
721 : current_sa_index);
722 1143 : goto trace;
723 : }
724 :
725 223462 : lb = b[0];
726 223462 : n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
727 223462 : if (n_bufs == 0)
728 : {
729 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
730 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
731 : noop_nexts, drop_next, current_sa_index);
732 0 : goto trace;
733 : }
734 :
735 223462 : if (n_bufs > 1)
736 : {
737 : /* find last buffer in the chain */
738 101036 : while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
739 52528 : lb = vlib_get_buffer (vm, lb->next_buffer);
740 : }
741 :
742 223462 : if (PREDICT_FALSE (esp_seq_advance (sa0)))
743 : {
744 840 : err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
745 840 : esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
746 : noop_nexts, drop_next, current_sa_index);
747 840 : goto trace;
748 : }
749 :
750 : /* space for IV */
751 222622 : hdr_len = iv_sz;
752 :
753 222622 : if (ipsec_sa_is_set_IS_TUNNEL (sa0))
754 : {
755 67127 : payload = vlib_buffer_get_current (b[0]);
756 67127 : next_hdr_ptr = esp_add_footer_and_icv (
757 : vm, &lb, esp_align, icv_sz, node, buffer_data_size,
758 : vlib_buffer_length_in_chain (vm, b[0]));
759 67127 : if (!next_hdr_ptr)
760 : {
761 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
762 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
763 : n_noop, noop_nexts, drop_next,
764 : current_sa_index);
765 0 : goto trace;
766 : }
767 67127 : b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
768 67127 : payload_len = b[0]->current_length;
769 67127 : payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
770 :
771 : /* ESP header */
772 67127 : hdr_len += sizeof (*esp);
773 67127 : esp = (esp_header_t *) (payload - hdr_len);
774 :
775 : /* optional UDP header */
776 67127 : if (ipsec_sa_is_set_UDP_ENCAP (sa0))
777 : {
778 3 : hdr_len += sizeof (udp_header_t);
779 3 : esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
780 3 : payload_len_total + hdr_len);
781 : }
782 :
783 : /* IP header */
784 67127 : if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
785 : {
786 : ip6_header_t *ip6;
787 29379 : u16 len = sizeof (ip6_header_t);
788 29379 : hdr_len += len;
789 29379 : ip6 = (ip6_header_t *) (payload - hdr_len);
790 29379 : clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
791 :
792 29379 : if (VNET_LINK_IP6 == lt)
793 : {
794 29062 : *next_hdr_ptr = IP_PROTOCOL_IPV6;
795 29062 : tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
796 : (const ip6_header_t *) payload,
797 : ip6);
798 : }
799 317 : else if (VNET_LINK_IP4 == lt)
800 : {
801 190 : *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
802 190 : tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
803 : (const ip4_header_t *) payload, ip6);
804 : }
805 127 : else if (VNET_LINK_MPLS == lt)
806 : {
807 127 : *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
808 127 : tunnel_encap_fixup_mplso6 (
809 127 : sa0->tunnel_flags, b[0],
810 : (const mpls_unicast_header_t *) payload, ip6);
811 : }
812 : else
813 0 : ASSERT (0);
814 :
815 29379 : len = payload_len_total + hdr_len - len;
816 29379 : ip6->payload_length = clib_net_to_host_u16 (len);
817 29379 : b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
818 : }
819 : else
820 : {
821 : ip4_header_t *ip4;
822 37748 : u16 len = sizeof (ip4_header_t);
823 37748 : hdr_len += len;
824 37748 : ip4 = (ip4_header_t *) (payload - hdr_len);
825 37748 : clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
826 :
827 37748 : if (VNET_LINK_IP6 == lt)
828 : {
829 190 : *next_hdr_ptr = IP_PROTOCOL_IPV6;
830 190 : tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
831 : (const ip6_header_t *)
832 : payload, ip4);
833 : }
834 37558 : else if (VNET_LINK_IP4 == lt)
835 : {
836 37431 : *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
837 37431 : tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
838 : (const ip4_header_t *)
839 : payload, ip4);
840 : }
841 127 : else if (VNET_LINK_MPLS == lt)
842 : {
843 127 : *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
844 127 : tunnel_encap_fixup_mplso4_w_chksum (
845 127 : sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
846 : ip4);
847 : }
848 : else
849 0 : ASSERT (0);
850 :
851 37748 : len = payload_len_total + hdr_len;
852 37748 : esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
853 : }
854 :
855 67127 : dpo = &sa0->dpo;
856 67127 : if (!is_tun)
857 : {
858 61205 : sync_next[0] = dpo->dpoi_next_node;
859 61205 : vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
860 : }
861 : else
862 5922 : sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
863 67127 : b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
864 : }
865 : else /* transport mode */
866 : {
867 : u8 *l2_hdr, l2_len, *ip_hdr;
868 : u16 ip_len;
869 : ip6_ext_header_t *ext_hdr;
870 155495 : udp_header_t *udp = 0;
871 155495 : u16 udp_len = 0;
872 155495 : u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
873 :
874 : /*
875 : * Get extension header chain length. It might be longer than the
876 : * buffer's pre_data area.
877 : */
878 310990 : ip_len =
879 : (VNET_LINK_IP6 == lt ?
880 32530 : esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
881 122965 : ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
882 155495 : if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
883 : {
884 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
885 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
886 : n_noop, noop_nexts, drop_next,
887 : current_sa_index);
888 0 : goto trace;
889 : }
890 :
891 155495 : vlib_buffer_advance (b[0], ip_len);
892 155495 : payload = vlib_buffer_get_current (b[0]);
893 155495 : next_hdr_ptr = esp_add_footer_and_icv (
894 : vm, &lb, esp_align, icv_sz, node, buffer_data_size,
895 : vlib_buffer_length_in_chain (vm, b[0]));
896 155495 : if (!next_hdr_ptr)
897 : {
898 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
899 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
900 : n_noop, noop_nexts, drop_next,
901 : current_sa_index);
902 0 : goto trace;
903 : }
904 :
905 155495 : b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
906 155495 : payload_len = b[0]->current_length;
907 155495 : payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
908 :
909 : /* ESP header */
910 155495 : hdr_len += sizeof (*esp);
911 155495 : esp = (esp_header_t *) (payload - hdr_len);
912 :
913 : /* optional UDP header */
914 155495 : if (ipsec_sa_is_set_UDP_ENCAP (sa0))
915 : {
916 1809 : hdr_len += sizeof (udp_header_t);
917 1809 : udp = (udp_header_t *) (payload - hdr_len);
918 : }
919 :
920 : /* IP header */
921 155495 : hdr_len += ip_len;
922 155495 : ip_hdr = payload - hdr_len;
923 :
924 : /* L2 header */
925 155495 : if (!is_tun)
926 : {
927 141826 : l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
928 141826 : hdr_len += l2_len;
929 141826 : l2_hdr = payload - hdr_len;
930 :
931 : /* copy l2 and ip header */
932 141826 : clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
933 : }
934 : else
935 13669 : l2_len = 0;
936 :
937 : u16 len;
938 155495 : len = payload_len_total + hdr_len - l2_len;
939 :
940 155495 : if (VNET_LINK_IP6 == lt)
941 : {
942 32530 : ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
943 32530 : if (PREDICT_TRUE (NULL == ext_hdr))
944 : {
945 32527 : *next_hdr_ptr = ip6->protocol;
946 32527 : ip6->protocol =
947 : (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
948 : }
949 : else
950 : {
951 3 : *next_hdr_ptr = ext_hdr->next_hdr;
952 3 : ext_hdr->next_hdr =
953 : (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
954 : }
955 32530 : ip6->payload_length =
956 32530 : clib_host_to_net_u16 (len - sizeof (ip6_header_t));
957 : }
958 122965 : else if (VNET_LINK_IP4 == lt)
959 : {
960 122965 : ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
961 122965 : *next_hdr_ptr = ip4->protocol;
962 122965 : esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
963 : (udp != NULL));
964 : }
965 :
966 155495 : clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
967 :
968 155495 : if (udp)
969 : {
970 1809 : udp_len = len - ip_len;
971 1809 : esp_fill_udp_hdr (sa0, udp, udp_len);
972 : }
973 :
974 155495 : sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
975 : }
976 :
977 222622 : if (lb != b[0])
978 : {
979 60300 : crypto_ops = &ptd->chained_crypto_ops;
980 60300 : integ_ops = &ptd->chained_integ_ops;
981 : }
982 : else
983 : {
984 162322 : crypto_ops = &ptd->crypto_ops;
985 162322 : integ_ops = &ptd->integ_ops;
986 : }
987 :
988 222622 : esp->spi = spi;
989 222622 : esp->seq = clib_net_to_host_u32 (sa0->seq);
990 :
991 222622 : if (is_async)
992 : {
993 41899 : async_op = sa0->crypto_async_enc_op_id;
994 :
995 : /* get a frame for this op if we don't yet have one or it's full
996 : */
997 82635 : if (NULL == async_frames[async_op] ||
998 40736 : vnet_crypto_async_frame_is_full (async_frames[async_op]))
999 : {
1000 1391 : async_frames[async_op] =
1001 1391 : vnet_crypto_async_get_frame (vm, async_op);
1002 :
1003 1391 : if (PREDICT_FALSE (!async_frames[async_op]))
1004 : {
1005 0 : err = ESP_ENCRYPT_ERROR_NO_AVAIL_FRAME;
1006 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
1007 : n_noop, noop_nexts, drop_next,
1008 : current_sa_index);
1009 0 : goto trace;
1010 : }
1011 :
1012 : /* Save the frame to the list we'll submit at the end */
1013 1391 : vec_add1 (ptd->async_frames, async_frames[async_op]);
1014 : }
1015 :
1016 41899 : esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
1017 : esp, payload, payload_len, iv_sz, icv_sz,
1018 41899 : from[b - bufs], sync_next[0], hdr_len,
1019 : async_next_node, lb);
1020 : }
1021 : else
1022 180723 : esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
1023 : payload, payload_len, iv_sz, icv_sz, n_sync, b,
1024 : lb, hdr_len, esp);
1025 :
1026 222622 : vlib_buffer_advance (b[0], 0LL - hdr_len);
1027 :
1028 222622 : current_sa_packets += 1;
1029 222622 : current_sa_bytes += payload_len_total;
1030 :
1031 224669 : trace:
1032 224669 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1033 : {
1034 220858 : esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
1035 : sizeof (*tr));
1036 220858 : if (INDEX_INVALID == sa_index0)
1037 63 : clib_memset_u8 (tr, 0xff, sizeof (*tr));
1038 : else
1039 : {
1040 220795 : tr->sa_index = sa_index0;
1041 220795 : tr->spi = sa0->spi;
1042 220795 : tr->seq = sa0->seq;
1043 220795 : tr->sa_seq_hi = sa0->seq_hi;
1044 220795 : tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
1045 220795 : tr->crypto_alg = sa0->crypto_alg;
1046 220795 : tr->integ_alg = sa0->integ_alg;
1047 : }
1048 : }
1049 :
1050 : /* next */
1051 224669 : if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
1052 : {
1053 2047 : noop_bi[n_noop] = from[b - bufs];
1054 2047 : n_noop++;
1055 : }
1056 222622 : else if (!is_async)
1057 : {
1058 180723 : sync_bi[n_sync] = from[b - bufs];
1059 180723 : sync_bufs[n_sync] = b[0];
1060 180723 : n_sync++;
1061 180723 : sync_next++;
1062 : }
1063 : else
1064 : {
1065 41899 : n_async++;
1066 : }
1067 224669 : n_left -= 1;
1068 224669 : b += 1;
1069 : }
1070 :
1071 5441 : if (INDEX_INVALID != current_sa_index)
1072 5439 : vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1073 : current_sa_index, current_sa_packets,
1074 : current_sa_bytes);
1075 5441 : if (n_sync)
1076 : {
1077 4248 : esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1078 : drop_next);
1079 4248 : esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1080 : sync_nexts, ptd->chunks, drop_next);
1081 :
1082 4248 : esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1083 : drop_next);
1084 4248 : esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1085 : sync_nexts, ptd->chunks, drop_next);
1086 :
1087 4248 : vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1088 : }
1089 5441 : if (n_async)
1090 : {
1091 : /* submit all of the open frames */
1092 : vnet_crypto_async_frame_t **async_frame;
1093 :
1094 2546 : vec_foreach (async_frame, ptd->async_frames)
1095 : {
1096 1391 : if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1097 : {
1098 0 : n_noop += esp_async_recycle_failed_submit (
1099 : vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1100 : IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
1101 : noop_nexts, drop_next, true);
1102 0 : vnet_crypto_async_reset_frame (*async_frame);
1103 0 : vnet_crypto_async_free_frame (vm, *async_frame);
1104 : }
1105 : }
1106 : }
1107 5441 : if (n_noop)
1108 62 : vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1109 :
1110 5441 : vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
1111 5441 : frame->n_vectors);
1112 :
1113 5441 : return frame->n_vectors;
1114 : }
1115 :
1116 : always_inline uword
1117 1391 : esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1118 : vlib_frame_t * frame)
1119 : {
1120 1391 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1121 1391 : u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1122 1391 : u32 *from = vlib_frame_vector_args (frame);
1123 1391 : u32 n_left = frame->n_vectors;
1124 :
1125 1391 : vlib_get_buffers (vm, from, b, n_left);
1126 :
1127 1391 : if (n_left >= 4)
1128 : {
1129 701 : vlib_prefetch_buffer_header (b[0], LOAD);
1130 701 : vlib_prefetch_buffer_header (b[1], LOAD);
1131 701 : vlib_prefetch_buffer_header (b[2], LOAD);
1132 701 : vlib_prefetch_buffer_header (b[3], LOAD);
1133 : }
1134 :
1135 10105 : while (n_left > 8)
1136 : {
1137 8714 : vlib_prefetch_buffer_header (b[4], LOAD);
1138 8714 : vlib_prefetch_buffer_header (b[5], LOAD);
1139 8714 : vlib_prefetch_buffer_header (b[6], LOAD);
1140 8714 : vlib_prefetch_buffer_header (b[7], LOAD);
1141 :
1142 8714 : next[0] = (esp_post_data (b[0]))->next_index;
1143 8714 : next[1] = (esp_post_data (b[1]))->next_index;
1144 8714 : next[2] = (esp_post_data (b[2]))->next_index;
1145 8714 : next[3] = (esp_post_data (b[3]))->next_index;
1146 :
1147 8714 : if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1148 : {
1149 0 : if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1150 : {
1151 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1152 : sizeof (*tr));
1153 0 : tr->next_index = next[0];
1154 : }
1155 0 : if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1156 : {
1157 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1158 : sizeof (*tr));
1159 0 : tr->next_index = next[1];
1160 : }
1161 0 : if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1162 : {
1163 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1164 : sizeof (*tr));
1165 0 : tr->next_index = next[2];
1166 : }
1167 0 : if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1168 : {
1169 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1170 : sizeof (*tr));
1171 0 : tr->next_index = next[3];
1172 : }
1173 : }
1174 :
1175 8714 : b += 4;
1176 8714 : next += 4;
1177 8714 : n_left -= 4;
1178 : }
1179 :
1180 8434 : while (n_left > 0)
1181 : {
1182 7043 : next[0] = (esp_post_data (b[0]))->next_index;
1183 7043 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1184 : {
1185 6790 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1186 : sizeof (*tr));
1187 6790 : tr->next_index = next[0];
1188 : }
1189 :
1190 7043 : b += 1;
1191 7043 : next += 1;
1192 7043 : n_left -= 1;
1193 : }
1194 :
1195 1391 : vlib_node_increment_counter (vm, node->node_index,
1196 : ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1197 1391 : frame->n_vectors);
1198 1391 : vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1199 1391 : return frame->n_vectors;
1200 : }
1201 :
1202 6461 : VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1203 : vlib_node_runtime_t * node,
1204 : vlib_frame_t * from_frame)
1205 : {
1206 8450 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
1207 4225 : esp_encrypt_async_next.esp4_post_next);
1208 : }
1209 :
1210 : /* *INDENT-OFF* */
1211 178120 : VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1212 : .name = "esp4-encrypt",
1213 : .vector_size = sizeof (u32),
1214 : .format_trace = format_esp_encrypt_trace,
1215 : .type = VLIB_NODE_TYPE_INTERNAL,
1216 :
1217 : .n_errors = ESP_ENCRYPT_N_ERROR,
1218 : .error_counters = esp_encrypt_error_counters,
1219 :
1220 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1221 : .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1222 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1223 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1224 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1225 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1226 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1227 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1228 : };
1229 : /* *INDENT-ON* */
1230 :
1231 3227 : VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1232 : vlib_node_runtime_t * node,
1233 : vlib_frame_t * from_frame)
1234 : {
1235 991 : return esp_encrypt_post_inline (vm, node, from_frame);
1236 : }
1237 :
1238 : /* *INDENT-OFF* */
1239 178120 : VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1240 : .name = "esp4-encrypt-post",
1241 : .vector_size = sizeof (u32),
1242 : .format_trace = format_esp_post_encrypt_trace,
1243 : .type = VLIB_NODE_TYPE_INTERNAL,
1244 : .sibling_of = "esp4-encrypt",
1245 :
1246 : .n_errors = ESP_ENCRYPT_N_ERROR,
1247 : .error_counters = esp_encrypt_error_counters,
1248 : };
1249 : /* *INDENT-ON* */
1250 :
1251 3179 : VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1252 : vlib_node_runtime_t * node,
1253 : vlib_frame_t * from_frame)
1254 : {
1255 1886 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
1256 943 : esp_encrypt_async_next.esp6_post_next);
1257 : }
1258 :
1259 : /* *INDENT-OFF* */
1260 178120 : VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1261 : .name = "esp6-encrypt",
1262 : .vector_size = sizeof (u32),
1263 : .format_trace = format_esp_encrypt_trace,
1264 : .type = VLIB_NODE_TYPE_INTERNAL,
1265 : .sibling_of = "esp4-encrypt",
1266 :
1267 : .n_errors = ESP_ENCRYPT_N_ERROR,
1268 : .error_counters = esp_encrypt_error_counters,
1269 : };
1270 : /* *INDENT-ON* */
1271 :
1272 2636 : VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1273 : vlib_node_runtime_t * node,
1274 : vlib_frame_t * from_frame)
1275 : {
1276 400 : return esp_encrypt_post_inline (vm, node, from_frame);
1277 : }
1278 :
1279 : /* *INDENT-OFF* */
1280 178120 : VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1281 : .name = "esp6-encrypt-post",
1282 : .vector_size = sizeof (u32),
1283 : .format_trace = format_esp_post_encrypt_trace,
1284 : .type = VLIB_NODE_TYPE_INTERNAL,
1285 : .sibling_of = "esp4-encrypt",
1286 :
1287 : .n_errors = ESP_ENCRYPT_N_ERROR,
1288 : .error_counters = esp_encrypt_error_counters,
1289 : };
1290 : /* *INDENT-ON* */
1291 :
1292 2437 : VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1293 : vlib_node_runtime_t * node,
1294 : vlib_frame_t * from_frame)
1295 : {
1296 402 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
1297 201 : esp_encrypt_async_next.esp4_tun_post_next);
1298 : }
1299 :
1300 : /* *INDENT-OFF* */
1301 178120 : VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1302 : .name = "esp4-encrypt-tun",
1303 : .vector_size = sizeof (u32),
1304 : .format_trace = format_esp_encrypt_trace,
1305 : .type = VLIB_NODE_TYPE_INTERNAL,
1306 :
1307 : .n_errors = ESP_ENCRYPT_N_ERROR,
1308 : .error_counters = esp_encrypt_error_counters,
1309 :
1310 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1311 : .next_nodes = {
1312 : [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1313 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1314 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1315 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1316 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1317 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1318 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1319 : },
1320 : };
1321 :
1322 2236 : VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1323 : vlib_node_runtime_t * node,
1324 : vlib_frame_t * from_frame)
1325 : {
1326 0 : return esp_encrypt_post_inline (vm, node, from_frame);
1327 : }
1328 :
1329 : /* *INDENT-OFF* */
1330 178120 : VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1331 : .name = "esp4-encrypt-tun-post",
1332 : .vector_size = sizeof (u32),
1333 : .format_trace = format_esp_post_encrypt_trace,
1334 : .type = VLIB_NODE_TYPE_INTERNAL,
1335 : .sibling_of = "esp4-encrypt-tun",
1336 :
1337 : .n_errors = ESP_ENCRYPT_N_ERROR,
1338 : .error_counters = esp_encrypt_error_counters,
1339 : };
1340 : /* *INDENT-ON* */
1341 :
1342 2306 : VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1343 : vlib_node_runtime_t * node,
1344 : vlib_frame_t * from_frame)
1345 : {
1346 140 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
1347 70 : esp_encrypt_async_next.esp6_tun_post_next);
1348 : }
1349 :
1350 : /* *INDENT-OFF* */
1351 178120 : VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1352 : .name = "esp6-encrypt-tun",
1353 : .vector_size = sizeof (u32),
1354 : .format_trace = format_esp_encrypt_trace,
1355 : .type = VLIB_NODE_TYPE_INTERNAL,
1356 :
1357 : .n_errors = ESP_ENCRYPT_N_ERROR,
1358 : .error_counters = esp_encrypt_error_counters,
1359 :
1360 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1361 : .next_nodes = {
1362 : [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1363 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1364 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1365 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1366 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1367 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1368 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1369 : },
1370 : };
1371 :
1372 : /* *INDENT-ON* */
1373 :
1374 2236 : VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1375 : vlib_node_runtime_t * node,
1376 : vlib_frame_t * from_frame)
1377 : {
1378 0 : return esp_encrypt_post_inline (vm, node, from_frame);
1379 : }
1380 :
1381 : /* *INDENT-OFF* */
1382 178120 : VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1383 : .name = "esp6-encrypt-tun-post",
1384 : .vector_size = sizeof (u32),
1385 : .format_trace = format_esp_post_encrypt_trace,
1386 : .type = VLIB_NODE_TYPE_INTERNAL,
1387 : .sibling_of = "esp-mpls-encrypt-tun",
1388 :
1389 : .n_errors = ESP_ENCRYPT_N_ERROR,
1390 : .error_counters = esp_encrypt_error_counters,
1391 : };
1392 : /* *INDENT-ON* */
1393 :
1394 2238 : VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
1395 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1396 : {
1397 4 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
1398 2 : esp_encrypt_async_next.esp_mpls_tun_post_next);
1399 : }
1400 :
1401 178120 : VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
1402 : .name = "esp-mpls-encrypt-tun",
1403 : .vector_size = sizeof (u32),
1404 : .format_trace = format_esp_encrypt_trace,
1405 : .type = VLIB_NODE_TYPE_INTERNAL,
1406 :
1407 : .n_errors = ESP_ENCRYPT_N_ERROR,
1408 : .error_counters = esp_encrypt_error_counters,
1409 :
1410 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1411 : .next_nodes = {
1412 : [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1413 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1414 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1415 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1416 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1417 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1418 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1419 : },
1420 : };
1421 :
1422 2236 : VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
1423 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1424 : {
1425 0 : return esp_encrypt_post_inline (vm, node, from_frame);
1426 : }
1427 :
1428 178120 : VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
1429 : .name = "esp-mpls-encrypt-tun-post",
1430 : .vector_size = sizeof (u32),
1431 : .format_trace = format_esp_post_encrypt_trace,
1432 : .type = VLIB_NODE_TYPE_INTERNAL,
1433 : .sibling_of = "esp-mpls-encrypt-tun",
1434 :
1435 : .n_errors = ESP_ENCRYPT_N_ERROR,
1436 : .error_counters = esp_encrypt_error_counters,
1437 : };
1438 :
1439 : #ifndef CLIB_MARCH_VARIANT
1440 :
1441 : static clib_error_t *
1442 559 : esp_encrypt_init (vlib_main_t *vm)
1443 : {
1444 559 : ipsec_main_t *im = &ipsec_main;
1445 :
1446 559 : im->esp4_enc_fq_index =
1447 559 : vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
1448 559 : im->esp6_enc_fq_index =
1449 559 : vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
1450 559 : im->esp4_enc_tun_fq_index =
1451 559 : vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
1452 559 : im->esp6_enc_tun_fq_index =
1453 559 : vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
1454 559 : im->esp_mpls_enc_tun_fq_index =
1455 559 : vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
1456 :
1457 559 : return 0;
1458 : }
1459 :
1460 53759 : VLIB_INIT_FUNCTION (esp_encrypt_init);
1461 :
1462 : #endif
1463 :
1464 : /*
1465 : * fd.io coding-style-patch-verification: ON
1466 : *
1467 : * Local Variables:
1468 : * eval: (c-set-style "gnu")
1469 : * End:
1470 : */
|