Line data Source code
1 : /*
2 : * esp_encrypt.c : IPSec ESP encrypt node
3 : *
4 : * Copyright (c) 2015 Cisco and/or its affiliates.
5 : * Licensed under the Apache License, Version 2.0 (the "License");
6 : * you may not use this file except in compliance with the License.
7 : * You may obtain a copy of the License at:
8 : *
9 : * http://www.apache.org/licenses/LICENSE-2.0
10 : *
11 : * Unless required by applicable law or agreed to in writing, software
12 : * distributed under the License is distributed on an "AS IS" BASIS,
13 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 : * See the License for the specific language governing permissions and
15 : * limitations under the License.
16 : */
17 :
18 : #include <vnet/vnet.h>
19 : #include <vnet/api_errno.h>
20 : #include <vnet/ip/ip.h>
21 :
22 : #include <vnet/crypto/crypto.h>
23 :
24 : #include <vnet/ipsec/ipsec.h>
25 : #include <vnet/ipsec/ipsec_tun.h>
26 : #include <vnet/ipsec/ipsec.api_enum.h>
27 : #include <vnet/ipsec/esp.h>
28 : #include <vnet/tunnel/tunnel_dp.h>
29 :
30 : #define foreach_esp_encrypt_next \
31 : _ (DROP4, "ip4-drop") \
32 : _ (DROP6, "ip6-drop") \
33 : _ (DROP_MPLS, "mpls-drop") \
34 : _ (HANDOFF4, "handoff4") \
35 : _ (HANDOFF6, "handoff6") \
36 : _ (HANDOFF_MPLS, "handoff-mpls") \
37 : _ (INTERFACE_OUTPUT, "interface-output")
38 :
39 : #define _(v, s) ESP_ENCRYPT_NEXT_##v,
40 : typedef enum
41 : {
42 : foreach_esp_encrypt_next
43 : #undef _
44 : ESP_ENCRYPT_N_NEXT,
45 : } esp_encrypt_next_t;
46 :
47 : typedef struct
48 : {
49 : u32 sa_index;
50 : u32 spi;
51 : u32 seq;
52 : u32 sa_seq_hi;
53 : u8 udp_encap;
54 : ipsec_crypto_alg_t crypto_alg;
55 : ipsec_integ_alg_t integ_alg;
56 : } esp_encrypt_trace_t;
57 :
58 : typedef struct
59 : {
60 : u32 next_index;
61 : } esp_encrypt_post_trace_t;
62 :
63 : typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
64 :
65 : /* packet trace format function */
66 : static u8 *
67 188911 : format_esp_encrypt_trace (u8 * s, va_list * args)
68 : {
69 188911 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
70 188911 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
71 188911 : esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
72 :
73 : s =
74 188911 : format (s,
75 : "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
76 : t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
77 : format_ipsec_crypto_alg,
78 188911 : t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
79 188911 : t->udp_encap ? " udp-encap-enabled" : "");
80 188911 : return s;
81 : }
82 :
83 : static u8 *
84 1030 : format_esp_post_encrypt_trace (u8 * s, va_list * args)
85 : {
86 1030 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
87 1030 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
88 1030 : esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
89 :
90 1030 : s = format (s, "esp-post: next node index %u", t->next_index);
91 1030 : return s;
92 : }
93 :
94 : /* pad packet in input buffer */
95 : static_always_inline u8 *
96 245116 : esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
97 : u8 icv_sz, vlib_node_runtime_t *node,
98 : u16 buffer_data_size, uword total_len)
99 : {
100 : static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
101 : 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
102 : 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
103 : };
104 :
105 245116 : u16 min_length = total_len + sizeof (esp_footer_t);
106 245116 : u16 new_length = round_pow2 (min_length, esp_align);
107 245116 : u8 pad_bytes = new_length - min_length;
108 245116 : esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
109 245116 : last[0]->current_length + pad_bytes);
110 245116 : u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
111 :
112 245116 : if (last[0]->current_data + last[0]->current_length + tail_sz >
113 : buffer_data_size)
114 : {
115 29480 : u32 tmp_bi = 0;
116 29480 : if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
117 0 : return 0;
118 :
119 29480 : vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
120 29480 : last[0]->next_buffer = tmp_bi;
121 29480 : last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
122 29480 : f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
123 29480 : tmp->current_length += tail_sz;
124 29480 : last[0] = tmp;
125 : }
126 : else
127 215636 : last[0]->current_length += tail_sz;
128 :
129 245116 : f->pad_length = pad_bytes;
130 245116 : if (pad_bytes)
131 : {
132 166033 : ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
133 166033 : pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
134 166033 : clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
135 : }
136 :
137 245116 : return &f->next_header;
138 : }
139 :
140 : static_always_inline void
141 175167 : esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
142 : {
143 : ip_csum_t sum;
144 : u16 old_len;
145 :
146 175167 : len = clib_net_to_host_u16 (len);
147 175167 : old_len = ip4->length;
148 :
149 175167 : if (is_transport)
150 : {
151 133399 : u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
152 :
153 133399 : sum = ip_csum_update (ip4->checksum, ip4->protocol,
154 : prot, ip4_header_t, protocol);
155 133399 : ip4->protocol = prot;
156 :
157 133399 : sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
158 : }
159 : else
160 41768 : sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
161 :
162 175167 : ip4->length = len;
163 175167 : ip4->checksum = ip_csum_fold (sum);
164 175167 : }
165 :
166 : static_always_inline void
167 1812 : esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
168 : {
169 1812 : clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
170 1812 : udp->length = clib_net_to_host_u16 (len);
171 1812 : }
172 :
173 : static_always_inline u8
174 36555 : ext_hdr_is_pre_esp (u8 nexthdr)
175 : {
176 : #ifdef CLIB_HAVE_VEC128
177 : static const u8x16 ext_hdr_types = {
178 : IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
179 : IP_PROTOCOL_IPV6_ROUTE,
180 : IP_PROTOCOL_IPV6_FRAGMENTATION,
181 : };
182 :
183 36555 : return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
184 : #else
185 : return (!(nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) ||
186 : !(nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) ||
187 : !(nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION));
188 : #endif
189 : }
190 :
191 : static_always_inline u8
192 36550 : esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
193 : {
194 : /* this code assumes that HbH, route and frag headers will be before
195 : others, if that is not the case, they will end up encrypted */
196 36550 : u8 len = sizeof (ip6_header_t);
197 : ip6_ext_header_t *p;
198 :
199 : /* if next packet doesn't have ext header */
200 36550 : if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
201 : {
202 36547 : *ext_hdr = NULL;
203 36547 : return len;
204 : }
205 :
206 3 : p = ip6_next_header (ip6);
207 3 : len += ip6_ext_header_len (p);
208 5 : while (ext_hdr_is_pre_esp (p->next_hdr))
209 : {
210 2 : len += ip6_ext_header_len (p);
211 2 : p = ip6_ext_next_header (p);
212 : }
213 :
214 3 : *ext_hdr = p;
215 3 : return len;
216 : }
217 :
218 : /* IPsec IV generation: IVs requirements differ depending of the
219 : * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
220 : * be predictable but should never be reused with the same key material
221 : * for CTR and GCM.
222 : * To avoid reusing the same IVs between multiple VPP instances and between
223 : * restarts, we use a properly chosen PRNG to generate IVs. To ensure the IV is
224 : * unpredictable for CBC, it is then encrypted using the same key as the
225 : * message. You can refer to NIST SP800-38a and NIST SP800-38d for more
226 : * details. */
227 : static_always_inline void *
228 230422 : esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
229 : {
230 230422 : ASSERT (iv_sz >= sizeof (u64));
231 230422 : u64 *iv = (u64 *) (payload - iv_sz);
232 230422 : clib_memset_u8 (iv, 0, iv_sz);
233 230422 : *iv = clib_pcg64i_random_r (&sa->iv_prng);
234 230422 : return iv;
235 : }
236 :
237 : static_always_inline void
238 9454 : esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
239 : vnet_crypto_op_t * ops, vlib_buffer_t * b[],
240 : u16 * nexts, vnet_crypto_op_chunk_t * chunks,
241 : u16 drop_next)
242 : {
243 9454 : u32 n_fail, n_ops = vec_len (ops);
244 9454 : vnet_crypto_op_t *op = ops;
245 :
246 9454 : if (n_ops == 0)
247 8282 : return;
248 :
249 1172 : n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
250 :
251 1172 : while (n_fail)
252 : {
253 0 : ASSERT (op - ops < n_ops);
254 :
255 0 : if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
256 : {
257 0 : u32 bi = op->user_data;
258 0 : esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
259 : ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
260 : bi, nexts, drop_next,
261 0 : vnet_buffer (b[bi])->ipsec.sad_index);
262 0 : n_fail--;
263 : }
264 0 : op++;
265 : }
266 : }
267 :
268 : static_always_inline void
269 9454 : esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
270 : vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
271 : u16 drop_next)
272 : {
273 9454 : u32 n_fail, n_ops = vec_len (ops);
274 9454 : vnet_crypto_op_t *op = ops;
275 :
276 9454 : if (n_ops == 0)
277 3075 : return;
278 :
279 6379 : n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
280 :
281 6379 : while (n_fail)
282 : {
283 0 : ASSERT (op - ops < n_ops);
284 :
285 0 : if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
286 : {
287 0 : u32 bi = op->user_data;
288 0 : esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
289 : ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
290 : bi, nexts, drop_next,
291 0 : vnet_buffer (b[bi])->ipsec.sad_index);
292 0 : n_fail--;
293 : }
294 0 : op++;
295 : }
296 : }
297 :
298 : static_always_inline u32
299 68072 : esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
300 : ipsec_sa_t * sa0, vlib_buffer_t * b,
301 : vlib_buffer_t * lb, u8 icv_sz, u8 * start,
302 : u32 start_len, u16 * n_ch)
303 : {
304 : vnet_crypto_op_chunk_t *ch;
305 68072 : vlib_buffer_t *cb = b;
306 68072 : u32 n_chunks = 1;
307 : u32 total_len;
308 68072 : vec_add2 (ptd->chunks, ch, 1);
309 68072 : total_len = ch->len = start_len;
310 68072 : ch->src = ch->dst = start;
311 68072 : cb = vlib_get_buffer (vm, cb->next_buffer);
312 :
313 : while (1)
314 : {
315 87636 : vec_add2 (ptd->chunks, ch, 1);
316 87636 : n_chunks += 1;
317 87636 : if (lb == cb)
318 68072 : total_len += ch->len = cb->current_length - icv_sz;
319 : else
320 19564 : total_len += ch->len = cb->current_length;
321 87636 : ch->src = ch->dst = vlib_buffer_get_current (cb);
322 :
323 87636 : if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
324 68072 : break;
325 :
326 19564 : cb = vlib_get_buffer (vm, cb->next_buffer);
327 : }
328 :
329 68072 : if (n_ch)
330 48776 : *n_ch = n_chunks;
331 :
332 68072 : return total_len;
333 : }
334 :
335 : static_always_inline u32
336 49044 : esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
337 : ipsec_sa_t * sa0, vlib_buffer_t * b,
338 : vlib_buffer_t * lb, u8 icv_sz, u8 * start,
339 : u32 start_len, u8 * digest, u16 * n_ch)
340 : {
341 : vnet_crypto_op_chunk_t *ch;
342 49044 : vlib_buffer_t *cb = b;
343 49044 : u32 n_chunks = 1;
344 : u32 total_len;
345 49044 : vec_add2 (ptd->chunks, ch, 1);
346 49044 : total_len = ch->len = start_len;
347 49044 : ch->src = start;
348 49044 : cb = vlib_get_buffer (vm, cb->next_buffer);
349 :
350 : while (1)
351 : {
352 62712 : vec_add2 (ptd->chunks, ch, 1);
353 62712 : n_chunks += 1;
354 62712 : if (lb == cb)
355 : {
356 49044 : total_len += ch->len = cb->current_length - icv_sz;
357 49044 : if (ipsec_sa_is_set_USE_ESN (sa0))
358 : {
359 24522 : u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
360 24522 : clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
361 24522 : ch->len += sizeof (seq_hi);
362 24522 : total_len += sizeof (seq_hi);
363 : }
364 : }
365 : else
366 13668 : total_len += ch->len = cb->current_length;
367 62712 : ch->src = vlib_buffer_get_current (cb);
368 :
369 62712 : if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
370 49044 : break;
371 :
372 13668 : cb = vlib_get_buffer (vm, cb->next_buffer);
373 : }
374 :
375 49044 : if (n_ch)
376 29748 : *n_ch = n_chunks;
377 :
378 49044 : return total_len;
379 : }
380 :
381 : always_inline void
382 203217 : esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
383 : vnet_crypto_op_t **crypto_ops,
384 : vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
385 : u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
386 : vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
387 : esp_header_t *esp)
388 : {
389 203217 : if (sa0->crypto_enc_op_id)
390 : {
391 : vnet_crypto_op_t *op;
392 188523 : vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
393 188523 : vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
394 188523 : u8 *crypto_start = payload;
395 : /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
396 : * have enough space for ESP header and footer which includes ICV */
397 188523 : ASSERT (payload_len > icv_sz);
398 188523 : u16 crypto_len = payload_len - icv_sz;
399 :
400 : /* generate the IV in front of the payload */
401 188523 : void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
402 :
403 188523 : op->key_index = sa0->crypto_key_index;
404 188523 : op->user_data = bi;
405 :
406 188523 : if (ipsec_sa_is_set_IS_CTR (sa0))
407 : {
408 : /* construct nonce in a scratch space in front of the IP header */
409 108753 : esp_ctr_nonce_t *nonce =
410 108753 : (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
411 108753 : if (ipsec_sa_is_set_IS_AEAD (sa0))
412 : {
413 : /* constuct aad in a scratch space in front of the nonce */
414 66195 : op->aad = (u8 *) nonce - sizeof (esp_aead_t);
415 66195 : op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
416 66195 : op->tag = payload + crypto_len;
417 66195 : op->tag_len = 16;
418 66195 : if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
419 : {
420 : /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
421 22494 : crypto_start -= iv_sz;
422 22494 : crypto_len += iv_sz;
423 : }
424 : }
425 : else
426 : {
427 42558 : nonce->ctr = clib_host_to_net_u32 (1);
428 : }
429 :
430 108753 : nonce->salt = sa0->salt;
431 108753 : nonce->iv = *(u64 *) pkt_iv;
432 108753 : op->iv = (u8 *) nonce;
433 : }
434 : else
435 : {
436 : /* construct zero iv in front of the IP header */
437 79770 : op->iv = pkt_iv - hdr_len - iv_sz;
438 79770 : clib_memset_u8 (op->iv, 0, iv_sz);
439 : /* include iv field in crypto */
440 79770 : crypto_start -= iv_sz;
441 79770 : crypto_len += iv_sz;
442 : }
443 :
444 188523 : if (PREDICT_FALSE (lb != b[0]))
445 : {
446 : /* is chained */
447 48776 : op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
448 48776 : op->chunk_index = vec_len (ptd->chunks);
449 48776 : op->tag = vlib_buffer_get_tail (lb) - icv_sz;
450 48776 : esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
451 48776 : crypto_start, crypto_len + icv_sz,
452 48776 : &op->n_chunks);
453 : }
454 : else
455 : {
456 : /* not chained */
457 139747 : op->src = op->dst = crypto_start;
458 139747 : op->len = crypto_len;
459 : }
460 : }
461 :
462 203217 : if (sa0->integ_op_id)
463 : {
464 : vnet_crypto_op_t *op;
465 136895 : vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
466 136895 : vnet_crypto_op_init (op, sa0->integ_op_id);
467 136895 : op->src = payload - iv_sz - sizeof (esp_header_t);
468 136895 : op->digest = payload + payload_len - icv_sz;
469 136895 : op->key_index = sa0->integ_key_index;
470 136895 : op->digest_len = icv_sz;
471 136895 : op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
472 136895 : op->user_data = bi;
473 :
474 136895 : if (lb != b[0])
475 : {
476 : /* is chained */
477 29748 : op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
478 29748 : op->chunk_index = vec_len (ptd->chunks);
479 29748 : op->digest = vlib_buffer_get_tail (lb) - icv_sz;
480 :
481 29748 : esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
482 29748 : payload - iv_sz - sizeof (esp_header_t),
483 29748 : payload_len + iv_sz +
484 29748 : sizeof (esp_header_t), op->digest,
485 29748 : &op->n_chunks);
486 : }
487 107147 : else if (ipsec_sa_is_set_USE_ESN (sa0))
488 : {
489 42278 : u32 tmp = clib_net_to_host_u32 (seq_hi);
490 42278 : clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
491 42278 : op->len += sizeof (seq_hi);
492 : }
493 : }
494 203217 : }
495 :
496 : static_always_inline void
497 41899 : esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
498 : vnet_crypto_async_frame_t *async_frame,
499 : ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
500 : u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
501 : u32 bi, u16 next, u32 hdr_len, u16 async_next,
502 : vlib_buffer_t *lb)
503 : {
504 41899 : esp_post_data_t *post = esp_post_data (b);
505 41899 : u8 *tag, *iv, *aad = 0;
506 41899 : u8 flag = 0;
507 41899 : const u32 key_index = sa->crypto_key_index;
508 : i16 crypto_start_offset, integ_start_offset;
509 : u16 crypto_total_len, integ_total_len;
510 :
511 41899 : post->next_index = next;
512 :
513 : /* crypto */
514 41899 : crypto_start_offset = integ_start_offset = payload - b->data;
515 41899 : crypto_total_len = integ_total_len = payload_len - icv_sz;
516 41899 : tag = payload + crypto_total_len;
517 :
518 : /* generate the IV in front of the payload */
519 41899 : void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
520 :
521 41899 : if (ipsec_sa_is_set_IS_CTR (sa))
522 : {
523 : /* construct nonce in a scratch space in front of the IP header */
524 24321 : esp_ctr_nonce_t *nonce =
525 24321 : (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
526 24321 : if (ipsec_sa_is_set_IS_AEAD (sa))
527 : {
528 : /* constuct aad in a scratch space in front of the nonce */
529 24321 : aad = (u8 *) nonce - sizeof (esp_aead_t);
530 24321 : esp_aad_fill (aad, esp, sa, sa->seq_hi);
531 24321 : if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
532 : {
533 : /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
534 0 : crypto_start_offset -= iv_sz;
535 0 : crypto_total_len += iv_sz;
536 : }
537 : }
538 : else
539 : {
540 0 : nonce->ctr = clib_host_to_net_u32 (1);
541 : }
542 :
543 24321 : nonce->salt = sa->salt;
544 24321 : nonce->iv = *(u64 *) pkt_iv;
545 24321 : iv = (u8 *) nonce;
546 : }
547 : else
548 : {
549 : /* construct zero iv in front of the IP header */
550 17578 : iv = pkt_iv - hdr_len - iv_sz;
551 17578 : clib_memset_u8 (iv, 0, iv_sz);
552 : /* include iv field in crypto */
553 17578 : crypto_start_offset -= iv_sz;
554 17578 : crypto_total_len += iv_sz;
555 : }
556 :
557 41899 : if (lb != b)
558 : {
559 : /* chain */
560 19296 : flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
561 19296 : tag = vlib_buffer_get_tail (lb) - icv_sz;
562 19296 : crypto_total_len = esp_encrypt_chain_crypto (
563 19296 : vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
564 19296 : crypto_total_len + icv_sz, 0);
565 : }
566 :
567 41899 : if (sa->integ_op_id)
568 : {
569 41899 : integ_start_offset -= iv_sz + sizeof (esp_header_t);
570 41899 : integ_total_len += iv_sz + sizeof (esp_header_t);
571 :
572 41899 : if (b != lb)
573 : {
574 19296 : integ_total_len = esp_encrypt_chain_integ (
575 : vm, ptd, sa, b, lb, icv_sz,
576 19296 : payload - iv_sz - sizeof (esp_header_t),
577 19296 : payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
578 : }
579 22603 : else if (ipsec_sa_is_set_USE_ESN (sa))
580 : {
581 10875 : u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
582 10875 : clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
583 10875 : integ_total_len += sizeof (seq_hi);
584 : }
585 : }
586 :
587 : /* this always succeeds because we know the frame is not full */
588 41899 : vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
589 41899 : integ_total_len - crypto_total_len,
590 : crypto_start_offset, integ_start_offset, bi,
591 : async_next, iv, tag, aad, flag);
592 41899 : }
593 :
594 : always_inline uword
595 5923 : esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
596 : vlib_frame_t *frame, vnet_link_t lt, int is_tun,
597 : u16 async_next_node)
598 : {
599 5923 : ipsec_main_t *im = &ipsec_main;
600 5923 : ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
601 5923 : u32 *from = vlib_frame_vector_args (frame);
602 5923 : u32 n_left = frame->n_vectors;
603 5923 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
604 5923 : u32 thread_index = vm->thread_index;
605 5923 : u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
606 5923 : u32 current_sa_index = ~0, current_sa_packets = 0;
607 5923 : u32 current_sa_bytes = 0, spi = 0;
608 5923 : u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
609 5923 : ipsec_sa_t *sa0 = 0;
610 : vlib_buffer_t *lb;
611 5923 : vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
612 5923 : vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
613 : vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
614 5923 : int is_async = im->async_mode;
615 5923 : vnet_crypto_async_op_id_t async_op = ~0;
616 5923 : u16 drop_next =
617 : (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
618 : (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
619 : ESP_ENCRYPT_NEXT_DROP_MPLS));
620 5923 : u16 handoff_next = (lt == VNET_LINK_IP6 ?
621 : ESP_ENCRYPT_NEXT_HANDOFF6 :
622 : (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
623 : ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
624 : vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
625 5923 : u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
626 5923 : u16 n_async = 0;
627 5923 : u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
628 : u32 sync_bi[VLIB_FRAME_SIZE];
629 : u32 noop_bi[VLIB_FRAME_SIZE];
630 : esp_encrypt_error_t err;
631 :
632 5923 : vlib_get_buffers (vm, from, b, n_left);
633 :
634 5923 : vec_reset_length (ptd->crypto_ops);
635 5923 : vec_reset_length (ptd->integ_ops);
636 5923 : vec_reset_length (ptd->chained_crypto_ops);
637 5923 : vec_reset_length (ptd->chained_integ_ops);
638 5923 : vec_reset_length (ptd->async_frames);
639 5923 : vec_reset_length (ptd->chunks);
640 5923 : clib_memset (async_frames, 0, sizeof (async_frames));
641 :
642 253149 : while (n_left > 0)
643 : {
644 : u32 sa_index0;
645 : dpo_id_t *dpo;
646 : esp_header_t *esp;
647 : u8 *payload, *next_hdr_ptr;
648 : u16 payload_len, payload_len_total, n_bufs;
649 : u32 hdr_len;
650 :
651 247226 : err = ESP_ENCRYPT_ERROR_RX_PKTS;
652 :
653 247226 : if (n_left > 2)
654 : {
655 : u8 *p;
656 236824 : vlib_prefetch_buffer_header (b[2], LOAD);
657 236824 : p = vlib_buffer_get_current (b[1]);
658 236824 : clib_prefetch_load (p);
659 236824 : p -= CLIB_CACHE_LINE_BYTES;
660 236824 : clib_prefetch_load (p);
661 : /* speculate that the trailer goes in the first buffer */
662 236824 : CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
663 : CLIB_CACHE_LINE_BYTES, LOAD);
664 : }
665 :
666 247226 : if (is_tun)
667 : {
668 : /* we are on a ipsec tunnel's feature arc */
669 19715 : vnet_buffer (b[0])->ipsec.sad_index =
670 19715 : sa_index0 = ipsec_tun_protect_get_sa_out
671 19715 : (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
672 :
673 19715 : if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
674 : {
675 63 : err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
676 63 : noop_nexts[n_noop] = drop_next;
677 63 : b[0]->error = node->errors[err];
678 63 : goto trace;
679 : }
680 : }
681 : else
682 227511 : sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
683 :
684 247163 : if (sa_index0 != current_sa_index)
685 : {
686 11026 : if (current_sa_packets)
687 4085 : vlib_increment_combined_counter (
688 : &ipsec_sa_counters, thread_index, current_sa_index,
689 : current_sa_packets, current_sa_bytes);
690 11026 : current_sa_packets = current_sa_bytes = 0;
691 :
692 11026 : sa0 = ipsec_sa_get (sa_index0);
693 :
694 11026 : if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
695 : sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
696 : !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
697 : {
698 1 : err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
699 1 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
700 : n_noop, noop_nexts, drop_next,
701 : sa_index0);
702 1 : goto trace;
703 : }
704 11025 : current_sa_index = sa_index0;
705 11025 : vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
706 : current_sa_index);
707 :
708 : /* fetch the second cacheline ASAP */
709 11025 : clib_prefetch_load (sa0->cacheline1);
710 :
711 11025 : spi = clib_net_to_host_u32 (sa0->spi);
712 11025 : esp_align = sa0->esp_block_align;
713 11025 : icv_sz = sa0->integ_icv_size;
714 11025 : iv_sz = sa0->crypto_iv_size;
715 11025 : is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
716 : }
717 :
718 247162 : if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
719 : {
720 : /* this is the first packet to use this SA, claim the SA
721 : * for this thread. this could happen simultaneously on
722 : * another thread */
723 9 : clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
724 : ipsec_sa_assign_thread (thread_index));
725 : }
726 :
727 247162 : if (PREDICT_FALSE (thread_index != sa0->thread_index))
728 : {
729 1143 : vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
730 1143 : err = ESP_ENCRYPT_ERROR_HANDOFF;
731 1143 : esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
732 : noop_nexts, handoff_next,
733 : current_sa_index);
734 1143 : goto trace;
735 : }
736 :
737 246019 : lb = b[0];
738 246019 : n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
739 246019 : if (n_bufs == 0)
740 : {
741 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
742 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
743 : noop_nexts, drop_next, current_sa_index);
744 0 : goto trace;
745 : }
746 :
747 246019 : if (n_bufs > 1)
748 : {
749 : /* find last buffer in the chain */
750 119528 : while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
751 62176 : lb = vlib_get_buffer (vm, lb->next_buffer);
752 : }
753 :
754 246019 : if (PREDICT_FALSE (esp_seq_advance (sa0)))
755 : {
756 903 : err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
757 903 : esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
758 : noop_nexts, drop_next, current_sa_index);
759 903 : goto trace;
760 : }
761 :
762 : /* space for IV */
763 245116 : hdr_len = iv_sz;
764 :
765 245116 : if (ipsec_sa_is_set_IS_TUNNEL (sa0))
766 : {
767 75167 : payload = vlib_buffer_get_current (b[0]);
768 75167 : next_hdr_ptr = esp_add_footer_and_icv (
769 : vm, &lb, esp_align, icv_sz, node, buffer_data_size,
770 : vlib_buffer_length_in_chain (vm, b[0]));
771 75167 : if (!next_hdr_ptr)
772 : {
773 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
774 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
775 : n_noop, noop_nexts, drop_next,
776 : current_sa_index);
777 0 : goto trace;
778 : }
779 75167 : b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
780 75167 : payload_len = b[0]->current_length;
781 75167 : payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
782 :
783 : /* ESP header */
784 75167 : hdr_len += sizeof (*esp);
785 75167 : esp = (esp_header_t *) (payload - hdr_len);
786 :
787 : /* optional UDP header */
788 75167 : if (ipsec_sa_is_set_UDP_ENCAP (sa0))
789 : {
790 3 : hdr_len += sizeof (udp_header_t);
791 3 : esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
792 3 : payload_len_total + hdr_len);
793 : }
794 :
795 : /* IP header */
796 75167 : if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
797 : {
798 : ip6_header_t *ip6;
799 33399 : u16 len = sizeof (ip6_header_t);
800 33399 : hdr_len += len;
801 33399 : ip6 = (ip6_header_t *) (payload - hdr_len);
802 33399 : clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
803 :
804 33399 : if (VNET_LINK_IP6 == lt)
805 : {
806 33082 : *next_hdr_ptr = IP_PROTOCOL_IPV6;
807 33082 : tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
808 : (const ip6_header_t *) payload,
809 : ip6);
810 : }
811 317 : else if (VNET_LINK_IP4 == lt)
812 : {
813 190 : *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
814 190 : tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
815 : (const ip4_header_t *) payload, ip6);
816 : }
817 127 : else if (VNET_LINK_MPLS == lt)
818 : {
819 127 : *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
820 127 : tunnel_encap_fixup_mplso6 (
821 127 : sa0->tunnel_flags, b[0],
822 : (const mpls_unicast_header_t *) payload, ip6);
823 : }
824 : else
825 0 : ASSERT (0);
826 :
827 33399 : len = payload_len_total + hdr_len - len;
828 33399 : ip6->payload_length = clib_net_to_host_u16 (len);
829 33399 : b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
830 : }
831 : else
832 : {
833 : ip4_header_t *ip4;
834 41768 : u16 len = sizeof (ip4_header_t);
835 41768 : hdr_len += len;
836 41768 : ip4 = (ip4_header_t *) (payload - hdr_len);
837 41768 : clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
838 :
839 41768 : if (VNET_LINK_IP6 == lt)
840 : {
841 190 : *next_hdr_ptr = IP_PROTOCOL_IPV6;
842 190 : tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
843 : (const ip6_header_t *)
844 : payload, ip4);
845 : }
846 41578 : else if (VNET_LINK_IP4 == lt)
847 : {
848 41451 : *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
849 41451 : tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
850 : (const ip4_header_t *)
851 : payload, ip4);
852 : }
853 127 : else if (VNET_LINK_MPLS == lt)
854 : {
855 127 : *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
856 127 : tunnel_encap_fixup_mplso4_w_chksum (
857 127 : sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
858 : ip4);
859 : }
860 : else
861 0 : ASSERT (0);
862 :
863 41768 : len = payload_len_total + hdr_len;
864 41768 : esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
865 : }
866 :
867 75167 : dpo = &sa0->dpo;
868 75167 : if (!is_tun)
869 : {
870 69245 : sync_next[0] = dpo->dpoi_next_node;
871 69245 : vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
872 : }
873 : else
874 5922 : sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
875 75167 : b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
876 : }
877 : else /* transport mode */
878 : {
879 : u8 *l2_hdr, l2_len, *ip_hdr;
880 : u16 ip_len;
881 : ip6_ext_header_t *ext_hdr;
882 169949 : udp_header_t *udp = 0;
883 169949 : u16 udp_len = 0;
884 169949 : u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
885 :
886 : /*
887 : * Get extension header chain length. It might be longer than the
888 : * buffer's pre_data area.
889 : */
890 339898 : ip_len =
891 : (VNET_LINK_IP6 == lt ?
892 36550 : esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
893 133399 : ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
894 169949 : if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
895 : {
896 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
897 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
898 : n_noop, noop_nexts, drop_next,
899 : current_sa_index);
900 0 : goto trace;
901 : }
902 :
903 169949 : vlib_buffer_advance (b[0], ip_len);
904 169949 : payload = vlib_buffer_get_current (b[0]);
905 169949 : next_hdr_ptr = esp_add_footer_and_icv (
906 : vm, &lb, esp_align, icv_sz, node, buffer_data_size,
907 : vlib_buffer_length_in_chain (vm, b[0]));
908 169949 : if (!next_hdr_ptr)
909 : {
910 0 : err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
911 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
912 : n_noop, noop_nexts, drop_next,
913 : current_sa_index);
914 0 : goto trace;
915 : }
916 :
917 169949 : b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
918 169949 : payload_len = b[0]->current_length;
919 169949 : payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
920 :
921 : /* ESP header */
922 169949 : hdr_len += sizeof (*esp);
923 169949 : esp = (esp_header_t *) (payload - hdr_len);
924 :
925 : /* optional UDP header */
926 169949 : if (ipsec_sa_is_set_UDP_ENCAP (sa0))
927 : {
928 1809 : hdr_len += sizeof (udp_header_t);
929 1809 : udp = (udp_header_t *) (payload - hdr_len);
930 : }
931 :
932 : /* IP header */
933 169949 : hdr_len += ip_len;
934 169949 : ip_hdr = payload - hdr_len;
935 :
936 : /* L2 header */
937 169949 : if (!is_tun)
938 : {
939 156280 : l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
940 156280 : hdr_len += l2_len;
941 156280 : l2_hdr = payload - hdr_len;
942 :
943 : /* copy l2 and ip header */
944 156280 : clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
945 : }
946 : else
947 13669 : l2_len = 0;
948 :
949 : u16 len;
950 169949 : len = payload_len_total + hdr_len - l2_len;
951 :
952 169949 : if (VNET_LINK_IP6 == lt)
953 : {
954 36550 : ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
955 36550 : if (PREDICT_TRUE (NULL == ext_hdr))
956 : {
957 36547 : *next_hdr_ptr = ip6->protocol;
958 36547 : ip6->protocol =
959 : (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
960 : }
961 : else
962 : {
963 3 : *next_hdr_ptr = ext_hdr->next_hdr;
964 3 : ext_hdr->next_hdr =
965 : (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
966 : }
967 36550 : ip6->payload_length =
968 36550 : clib_host_to_net_u16 (len - sizeof (ip6_header_t));
969 : }
970 133399 : else if (VNET_LINK_IP4 == lt)
971 : {
972 133399 : ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
973 133399 : *next_hdr_ptr = ip4->protocol;
974 133399 : esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
975 : (udp != NULL));
976 : }
977 :
978 169949 : clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
979 :
980 169949 : if (udp)
981 : {
982 1809 : udp_len = len - ip_len;
983 1809 : esp_fill_udp_hdr (sa0, udp, udp_len);
984 : }
985 :
986 169949 : sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
987 : }
988 :
989 245116 : if (lb != b[0])
990 : {
991 71556 : crypto_ops = &ptd->chained_crypto_ops;
992 71556 : integ_ops = &ptd->chained_integ_ops;
993 : }
994 : else
995 : {
996 173560 : crypto_ops = &ptd->crypto_ops;
997 173560 : integ_ops = &ptd->integ_ops;
998 : }
999 :
1000 245116 : esp->spi = spi;
1001 245116 : esp->seq = clib_net_to_host_u32 (sa0->seq);
1002 :
1003 245116 : if (is_async)
1004 : {
1005 41899 : async_op = sa0->crypto_async_enc_op_id;
1006 :
1007 : /* get a frame for this op if we don't yet have one or it's full
1008 : */
1009 82635 : if (NULL == async_frames[async_op] ||
1010 40736 : vnet_crypto_async_frame_is_full (async_frames[async_op]))
1011 : {
1012 1391 : async_frames[async_op] =
1013 1391 : vnet_crypto_async_get_frame (vm, async_op);
1014 :
1015 1391 : if (PREDICT_FALSE (!async_frames[async_op]))
1016 : {
1017 0 : err = ESP_ENCRYPT_ERROR_NO_AVAIL_FRAME;
1018 0 : esp_encrypt_set_next_index (b[0], node, thread_index, err,
1019 : n_noop, noop_nexts, drop_next,
1020 : current_sa_index);
1021 0 : goto trace;
1022 : }
1023 :
1024 : /* Save the frame to the list we'll submit at the end */
1025 1391 : vec_add1 (ptd->async_frames, async_frames[async_op]);
1026 : }
1027 :
1028 41899 : esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
1029 : esp, payload, payload_len, iv_sz, icv_sz,
1030 41899 : from[b - bufs], sync_next[0], hdr_len,
1031 : async_next_node, lb);
1032 : }
1033 : else
1034 203217 : esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
1035 : payload, payload_len, iv_sz, icv_sz, n_sync, b,
1036 : lb, hdr_len, esp);
1037 :
1038 245116 : vlib_buffer_advance (b[0], 0LL - hdr_len);
1039 :
1040 245116 : current_sa_packets += 1;
1041 245116 : current_sa_bytes += payload_len_total;
1042 :
1043 247226 : trace:
1044 247226 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1045 : {
1046 243415 : esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
1047 : sizeof (*tr));
1048 243415 : if (INDEX_INVALID == sa_index0)
1049 63 : clib_memset_u8 (tr, 0xff, sizeof (*tr));
1050 : else
1051 : {
1052 243352 : tr->sa_index = sa_index0;
1053 243352 : tr->spi = sa0->spi;
1054 243352 : tr->seq = sa0->seq;
1055 243352 : tr->sa_seq_hi = sa0->seq_hi;
1056 243352 : tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
1057 243352 : tr->crypto_alg = sa0->crypto_alg;
1058 243352 : tr->integ_alg = sa0->integ_alg;
1059 : }
1060 : }
1061 :
1062 : /* next */
1063 247226 : if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
1064 : {
1065 2110 : noop_bi[n_noop] = from[b - bufs];
1066 2110 : n_noop++;
1067 : }
1068 245116 : else if (!is_async)
1069 : {
1070 203217 : sync_bi[n_sync] = from[b - bufs];
1071 203217 : sync_bufs[n_sync] = b[0];
1072 203217 : n_sync++;
1073 203217 : sync_next++;
1074 : }
1075 : else
1076 : {
1077 41899 : n_async++;
1078 : }
1079 247226 : n_left -= 1;
1080 247226 : b += 1;
1081 : }
1082 :
1083 5923 : if (INDEX_INVALID != current_sa_index)
1084 5921 : vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1085 : current_sa_index, current_sa_packets,
1086 : current_sa_bytes);
1087 5923 : if (n_sync)
1088 : {
1089 4727 : esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1090 : drop_next);
1091 4727 : esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1092 : sync_nexts, ptd->chunks, drop_next);
1093 :
1094 4727 : esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1095 : drop_next);
1096 4727 : esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1097 : sync_nexts, ptd->chunks, drop_next);
1098 :
1099 4727 : vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1100 : }
1101 5923 : if (n_async)
1102 : {
1103 : /* submit all of the open frames */
1104 : vnet_crypto_async_frame_t **async_frame;
1105 :
1106 2546 : vec_foreach (async_frame, ptd->async_frames)
1107 : {
1108 1391 : if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1109 : {
1110 0 : n_noop += esp_async_recycle_failed_submit (
1111 : vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1112 : IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
1113 : noop_nexts, drop_next, true);
1114 0 : vnet_crypto_async_reset_frame (*async_frame);
1115 0 : vnet_crypto_async_free_frame (vm, *async_frame);
1116 : }
1117 : }
1118 : }
1119 5923 : if (n_noop)
1120 65 : vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1121 :
1122 5923 : vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
1123 5923 : frame->n_vectors);
1124 :
1125 5923 : return frame->n_vectors;
1126 : }
1127 :
1128 : always_inline uword
1129 1391 : esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1130 : vlib_frame_t * frame)
1131 : {
1132 1391 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1133 1391 : u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1134 1391 : u32 *from = vlib_frame_vector_args (frame);
1135 1391 : u32 n_left = frame->n_vectors;
1136 :
1137 1391 : vlib_get_buffers (vm, from, b, n_left);
1138 :
1139 1391 : if (n_left >= 4)
1140 : {
1141 701 : vlib_prefetch_buffer_header (b[0], LOAD);
1142 701 : vlib_prefetch_buffer_header (b[1], LOAD);
1143 701 : vlib_prefetch_buffer_header (b[2], LOAD);
1144 701 : vlib_prefetch_buffer_header (b[3], LOAD);
1145 : }
1146 :
1147 10105 : while (n_left > 8)
1148 : {
1149 8714 : vlib_prefetch_buffer_header (b[4], LOAD);
1150 8714 : vlib_prefetch_buffer_header (b[5], LOAD);
1151 8714 : vlib_prefetch_buffer_header (b[6], LOAD);
1152 8714 : vlib_prefetch_buffer_header (b[7], LOAD);
1153 :
1154 8714 : next[0] = (esp_post_data (b[0]))->next_index;
1155 8714 : next[1] = (esp_post_data (b[1]))->next_index;
1156 8714 : next[2] = (esp_post_data (b[2]))->next_index;
1157 8714 : next[3] = (esp_post_data (b[3]))->next_index;
1158 :
1159 8714 : if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1160 : {
1161 0 : if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1162 : {
1163 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1164 : sizeof (*tr));
1165 0 : tr->next_index = next[0];
1166 : }
1167 0 : if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1168 : {
1169 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1170 : sizeof (*tr));
1171 0 : tr->next_index = next[1];
1172 : }
1173 0 : if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1174 : {
1175 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1176 : sizeof (*tr));
1177 0 : tr->next_index = next[2];
1178 : }
1179 0 : if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1180 : {
1181 0 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1182 : sizeof (*tr));
1183 0 : tr->next_index = next[3];
1184 : }
1185 : }
1186 :
1187 8714 : b += 4;
1188 8714 : next += 4;
1189 8714 : n_left -= 4;
1190 : }
1191 :
1192 8434 : while (n_left > 0)
1193 : {
1194 7043 : next[0] = (esp_post_data (b[0]))->next_index;
1195 7043 : if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1196 : {
1197 6790 : esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1198 : sizeof (*tr));
1199 6790 : tr->next_index = next[0];
1200 : }
1201 :
1202 7043 : b += 1;
1203 7043 : next += 1;
1204 7043 : n_left -= 1;
1205 : }
1206 :
1207 1391 : vlib_node_increment_counter (vm, node->node_index,
1208 : ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1209 1391 : frame->n_vectors);
1210 1391 : vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1211 1391 : return frame->n_vectors;
1212 : }
1213 :
1214 6887 : VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1215 : vlib_node_runtime_t * node,
1216 : vlib_frame_t * from_frame)
1217 : {
1218 9174 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
1219 4587 : esp_encrypt_async_next.esp4_post_next);
1220 : }
1221 :
1222 : /* *INDENT-OFF* */
1223 183788 : VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1224 : .name = "esp4-encrypt",
1225 : .vector_size = sizeof (u32),
1226 : .format_trace = format_esp_encrypt_trace,
1227 : .type = VLIB_NODE_TYPE_INTERNAL,
1228 :
1229 : .n_errors = ESP_ENCRYPT_N_ERROR,
1230 : .error_counters = esp_encrypt_error_counters,
1231 :
1232 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1233 : .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1234 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1235 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1236 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1237 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1238 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1239 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1240 : };
1241 : /* *INDENT-ON* */
1242 :
1243 3291 : VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1244 : vlib_node_runtime_t * node,
1245 : vlib_frame_t * from_frame)
1246 : {
1247 991 : return esp_encrypt_post_inline (vm, node, from_frame);
1248 : }
1249 :
1250 : /* *INDENT-OFF* */
1251 183788 : VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1252 : .name = "esp4-encrypt-post",
1253 : .vector_size = sizeof (u32),
1254 : .format_trace = format_esp_post_encrypt_trace,
1255 : .type = VLIB_NODE_TYPE_INTERNAL,
1256 : .sibling_of = "esp4-encrypt",
1257 :
1258 : .n_errors = ESP_ENCRYPT_N_ERROR,
1259 : .error_counters = esp_encrypt_error_counters,
1260 : };
1261 : /* *INDENT-ON* */
1262 :
1263 3363 : VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1264 : vlib_node_runtime_t * node,
1265 : vlib_frame_t * from_frame)
1266 : {
1267 2126 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
1268 1063 : esp_encrypt_async_next.esp6_post_next);
1269 : }
1270 :
1271 : /* *INDENT-OFF* */
1272 183788 : VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1273 : .name = "esp6-encrypt",
1274 : .vector_size = sizeof (u32),
1275 : .format_trace = format_esp_encrypt_trace,
1276 : .type = VLIB_NODE_TYPE_INTERNAL,
1277 : .sibling_of = "esp4-encrypt",
1278 :
1279 : .n_errors = ESP_ENCRYPT_N_ERROR,
1280 : .error_counters = esp_encrypt_error_counters,
1281 : };
1282 : /* *INDENT-ON* */
1283 :
1284 2700 : VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1285 : vlib_node_runtime_t * node,
1286 : vlib_frame_t * from_frame)
1287 : {
1288 400 : return esp_encrypt_post_inline (vm, node, from_frame);
1289 : }
1290 :
1291 : /* *INDENT-OFF* */
1292 183788 : VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1293 : .name = "esp6-encrypt-post",
1294 : .vector_size = sizeof (u32),
1295 : .format_trace = format_esp_post_encrypt_trace,
1296 : .type = VLIB_NODE_TYPE_INTERNAL,
1297 : .sibling_of = "esp4-encrypt",
1298 :
1299 : .n_errors = ESP_ENCRYPT_N_ERROR,
1300 : .error_counters = esp_encrypt_error_counters,
1301 : };
1302 : /* *INDENT-ON* */
1303 :
1304 2501 : VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1305 : vlib_node_runtime_t * node,
1306 : vlib_frame_t * from_frame)
1307 : {
1308 402 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
1309 201 : esp_encrypt_async_next.esp4_tun_post_next);
1310 : }
1311 :
1312 : /* *INDENT-OFF* */
1313 183788 : VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1314 : .name = "esp4-encrypt-tun",
1315 : .vector_size = sizeof (u32),
1316 : .format_trace = format_esp_encrypt_trace,
1317 : .type = VLIB_NODE_TYPE_INTERNAL,
1318 :
1319 : .n_errors = ESP_ENCRYPT_N_ERROR,
1320 : .error_counters = esp_encrypt_error_counters,
1321 :
1322 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1323 : .next_nodes = {
1324 : [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1325 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1326 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1327 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1328 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1329 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1330 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1331 : },
1332 : };
1333 :
1334 2300 : VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1335 : vlib_node_runtime_t * node,
1336 : vlib_frame_t * from_frame)
1337 : {
1338 0 : return esp_encrypt_post_inline (vm, node, from_frame);
1339 : }
1340 :
1341 : /* *INDENT-OFF* */
1342 183788 : VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1343 : .name = "esp4-encrypt-tun-post",
1344 : .vector_size = sizeof (u32),
1345 : .format_trace = format_esp_post_encrypt_trace,
1346 : .type = VLIB_NODE_TYPE_INTERNAL,
1347 : .sibling_of = "esp4-encrypt-tun",
1348 :
1349 : .n_errors = ESP_ENCRYPT_N_ERROR,
1350 : .error_counters = esp_encrypt_error_counters,
1351 : };
1352 : /* *INDENT-ON* */
1353 :
1354 2370 : VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1355 : vlib_node_runtime_t * node,
1356 : vlib_frame_t * from_frame)
1357 : {
1358 140 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
1359 70 : esp_encrypt_async_next.esp6_tun_post_next);
1360 : }
1361 :
1362 : /* *INDENT-OFF* */
1363 183788 : VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1364 : .name = "esp6-encrypt-tun",
1365 : .vector_size = sizeof (u32),
1366 : .format_trace = format_esp_encrypt_trace,
1367 : .type = VLIB_NODE_TYPE_INTERNAL,
1368 :
1369 : .n_errors = ESP_ENCRYPT_N_ERROR,
1370 : .error_counters = esp_encrypt_error_counters,
1371 :
1372 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1373 : .next_nodes = {
1374 : [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1375 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1376 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1377 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1378 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1379 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1380 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1381 : },
1382 : };
1383 :
1384 : /* *INDENT-ON* */
1385 :
1386 2300 : VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1387 : vlib_node_runtime_t * node,
1388 : vlib_frame_t * from_frame)
1389 : {
1390 0 : return esp_encrypt_post_inline (vm, node, from_frame);
1391 : }
1392 :
1393 : /* *INDENT-OFF* */
1394 183788 : VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1395 : .name = "esp6-encrypt-tun-post",
1396 : .vector_size = sizeof (u32),
1397 : .format_trace = format_esp_post_encrypt_trace,
1398 : .type = VLIB_NODE_TYPE_INTERNAL,
1399 : .sibling_of = "esp-mpls-encrypt-tun",
1400 :
1401 : .n_errors = ESP_ENCRYPT_N_ERROR,
1402 : .error_counters = esp_encrypt_error_counters,
1403 : };
1404 : /* *INDENT-ON* */
1405 :
1406 2302 : VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
1407 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1408 : {
1409 4 : return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
1410 2 : esp_encrypt_async_next.esp_mpls_tun_post_next);
1411 : }
1412 :
1413 183788 : VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
1414 : .name = "esp-mpls-encrypt-tun",
1415 : .vector_size = sizeof (u32),
1416 : .format_trace = format_esp_encrypt_trace,
1417 : .type = VLIB_NODE_TYPE_INTERNAL,
1418 :
1419 : .n_errors = ESP_ENCRYPT_N_ERROR,
1420 : .error_counters = esp_encrypt_error_counters,
1421 :
1422 : .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1423 : .next_nodes = {
1424 : [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1425 : [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1426 : [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1427 : [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1428 : [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1429 : [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1430 : [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1431 : },
1432 : };
1433 :
1434 2300 : VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
1435 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1436 : {
1437 0 : return esp_encrypt_post_inline (vm, node, from_frame);
1438 : }
1439 :
1440 183788 : VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
1441 : .name = "esp-mpls-encrypt-tun-post",
1442 : .vector_size = sizeof (u32),
1443 : .format_trace = format_esp_post_encrypt_trace,
1444 : .type = VLIB_NODE_TYPE_INTERNAL,
1445 : .sibling_of = "esp-mpls-encrypt-tun",
1446 :
1447 : .n_errors = ESP_ENCRYPT_N_ERROR,
1448 : .error_counters = esp_encrypt_error_counters,
1449 : };
1450 :
1451 : #ifndef CLIB_MARCH_VARIANT
1452 :
1453 : static clib_error_t *
1454 575 : esp_encrypt_init (vlib_main_t *vm)
1455 : {
1456 575 : ipsec_main_t *im = &ipsec_main;
1457 :
1458 575 : im->esp4_enc_fq_index =
1459 575 : vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
1460 575 : im->esp6_enc_fq_index =
1461 575 : vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
1462 575 : im->esp4_enc_tun_fq_index =
1463 575 : vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
1464 575 : im->esp6_enc_tun_fq_index =
1465 575 : vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
1466 575 : im->esp_mpls_enc_tun_fq_index =
1467 575 : vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
1468 :
1469 575 : return 0;
1470 : }
1471 :
1472 54719 : VLIB_INIT_FUNCTION (esp_encrypt_init);
1473 :
1474 : #endif
1475 :
1476 : /*
1477 : * fd.io coding-style-patch-verification: ON
1478 : *
1479 : * Local Variables:
1480 : * eval: (c-set-style "gnu")
1481 : * End:
1482 : */
|