Line data Source code
1 : /*
2 : * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3 : *
4 : * Copyright (c) 2019 Cisco Systemss
5 : * Licensed under the Apache License, Version 2.0 (the "License");
6 : * you may not use this file except in compliance with the License.
7 : * You may obtain a copy of the License at:
8 : *
9 : * http://www.apache.org/licenses/LICENSE-2.0
10 : *
11 : * Unless required by applicable law or agreed to in writing, software
12 : * distributed under the License is distributed on an "AS IS" BASIS,
13 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 : * See the License for the specific language governing permissions and
15 : * limitations under the License.
16 : */
17 :
18 : #include <fcntl.h>
19 :
20 : #include <intel-ipsec-mb.h>
21 :
22 : #include <vnet/vnet.h>
23 : #include <vnet/plugin/plugin.h>
24 : #include <vpp/app/version.h>
25 : #include <vnet/crypto/crypto.h>
26 : #include <vppinfra/cpu.h>
27 :
28 : #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE
29 : #define EXPANDED_KEY_N_BYTES (16 * 15)
30 :
31 : typedef struct
32 : {
33 : CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
34 : MB_MGR *mgr;
35 : #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
36 : JOB_AES_HMAC burst_jobs[IMB_MAX_BURST_SIZE];
37 : #endif
38 : } ipsecmb_per_thread_data_t;
39 :
40 : typedef struct
41 : {
42 : u16 data_size;
43 : u8 block_size;
44 : aes_gcm_pre_t aes_gcm_pre;
45 : keyexp_t keyexp;
46 : hash_one_block_t hash_one_block;
47 : hash_fn_t hash_fn;
48 : } ipsecmb_alg_data_t;
49 :
50 : typedef struct ipsecmb_main_t_
51 : {
52 : ipsecmb_per_thread_data_t *per_thread_data;
53 : ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
54 : void **key_data;
55 : } ipsecmb_main_t;
56 :
57 : typedef struct
58 : {
59 : u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
60 : u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
61 : } ipsecmb_aes_key_data_t;
62 :
63 : static ipsecmb_main_t ipsecmb_main = { };
64 :
65 : /*
66 : * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
67 : */
68 : #define foreach_ipsecmb_hmac_op \
69 : _(SHA1, SHA1, sha1, 64, 20, 20) \
70 : _(SHA224, SHA_224, sha224, 64, 32, 28) \
71 : _(SHA256, SHA_256, sha256, 64, 32, 32) \
72 : _(SHA384, SHA_384, sha384, 128, 64, 48) \
73 : _(SHA512, SHA_512, sha512, 128, 64, 64)
74 :
75 : /*
76 : * (Alg, key-len-bits, JOB_CIPHER_MODE)
77 : */
78 : #define foreach_ipsecmb_cipher_op \
79 : _ (AES_128_CBC, 128, CBC) \
80 : _ (AES_192_CBC, 192, CBC) \
81 : _ (AES_256_CBC, 256, CBC) \
82 : _ (AES_128_CTR, 128, CNTR) \
83 : _ (AES_192_CTR, 192, CNTR) \
84 : _ (AES_256_CTR, 256, CNTR)
85 :
86 : /*
87 : * (Alg, key-len-bytes, iv-len-bytes)
88 : */
89 : #define foreach_ipsecmb_gcm_cipher_op \
90 : _(AES_128_GCM, 128) \
91 : _(AES_192_GCM, 192) \
92 : _(AES_256_GCM, 256)
93 :
94 : static_always_inline vnet_crypto_op_status_t
95 0 : ipsecmb_status_job (JOB_STS status)
96 : {
97 0 : switch (status)
98 : {
99 0 : case STS_COMPLETED:
100 0 : return VNET_CRYPTO_OP_STATUS_COMPLETED;
101 0 : case STS_BEING_PROCESSED:
102 : case STS_COMPLETED_AES:
103 : case STS_COMPLETED_HMAC:
104 0 : return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
105 0 : case STS_INVALID_ARGS:
106 : case STS_INTERNAL_ERROR:
107 : case STS_ERROR:
108 0 : return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
109 : }
110 0 : ASSERT (0);
111 0 : return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
112 : }
113 :
114 : always_inline void
115 157008 : ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
116 : {
117 157008 : vnet_crypto_op_t *op = job->user_data;
118 157008 : u32 len = op->digest_len ? op->digest_len : digest_size;
119 :
120 157008 : if (PREDICT_FALSE (STS_COMPLETED != job->status))
121 : {
122 0 : op->status = ipsecmb_status_job (job->status);
123 0 : *n_fail = *n_fail + 1;
124 0 : return;
125 : }
126 :
127 157008 : if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
128 : {
129 76852 : if ((memcmp (op->digest, job->auth_tag_output, len)))
130 : {
131 979 : *n_fail = *n_fail + 1;
132 979 : op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
133 979 : return;
134 : }
135 : }
136 80156 : else if (len == digest_size)
137 31 : clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
138 : else
139 80125 : clib_memcpy_fast (op->digest, job->auth_tag_output, len);
140 :
141 156029 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
142 : }
143 :
144 : #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
145 : static_always_inline u32
146 4345 : ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
147 : u32 block_size, u32 hash_size, u32 digest_size,
148 : JOB_HASH_ALG alg)
149 4345 : {
150 4345 : ipsecmb_main_t *imbm = &ipsecmb_main;
151 4345 : ipsecmb_per_thread_data_t *ptd =
152 4345 : vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
153 : JOB_AES_HMAC *job;
154 4345 : u32 i, n_fail = 0, ops_index = 0;
155 4345 : u8 scratch[n_ops][digest_size];
156 4345 : const u32 burst_sz =
157 : (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
158 :
159 8725 : while (n_ops)
160 : {
161 4380 : const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
162 : /*
163 : * configure all the jobs first ...
164 : */
165 161388 : for (i = 0; i < n; i++, ops_index++)
166 : {
167 157008 : vnet_crypto_op_t *op = ops[ops_index];
168 157008 : const u8 *kd = (u8 *) imbm->key_data[op->key_index];
169 :
170 157008 : job = &ptd->burst_jobs[i];
171 :
172 157008 : job->src = op->src;
173 157008 : job->hash_start_src_offset_in_bytes = 0;
174 157008 : job->msg_len_to_hash_in_bytes = op->len;
175 157008 : job->auth_tag_output_len_in_bytes = digest_size;
176 157008 : job->auth_tag_output = scratch[ops_index];
177 :
178 157008 : job->u.HMAC._hashed_auth_key_xor_ipad = kd;
179 157008 : job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
180 157008 : job->user_data = op;
181 : }
182 :
183 : /*
184 : * submit all jobs to be processed and retire completed jobs
185 : */
186 4380 : IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
187 :
188 161388 : for (i = 0; i < n; i++)
189 : {
190 157008 : job = &ptd->burst_jobs[i];
191 157008 : ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
192 : }
193 :
194 4380 : n_ops -= n;
195 : }
196 :
197 4345 : return ops_index - n_fail;
198 : }
199 : #else
200 : static_always_inline u32
201 : ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
202 : u32 block_size, u32 hash_size, u32 digest_size,
203 : JOB_HASH_ALG alg)
204 : {
205 : ipsecmb_main_t *imbm = &ipsecmb_main;
206 : ipsecmb_per_thread_data_t *ptd =
207 : vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
208 : JOB_AES_HMAC *job;
209 : u32 i, n_fail = 0;
210 : u8 scratch[n_ops][digest_size];
211 :
212 : /*
213 : * queue all the jobs first ...
214 : */
215 : for (i = 0; i < n_ops; i++)
216 : {
217 : vnet_crypto_op_t *op = ops[i];
218 : u8 *kd = (u8 *) imbm->key_data[op->key_index];
219 :
220 : job = IMB_GET_NEXT_JOB (ptd->mgr);
221 :
222 : job->src = op->src;
223 : job->hash_start_src_offset_in_bytes = 0;
224 : job->msg_len_to_hash_in_bytes = op->len;
225 : job->hash_alg = alg;
226 : job->auth_tag_output_len_in_bytes = digest_size;
227 : job->auth_tag_output = scratch[i];
228 :
229 : job->cipher_mode = NULL_CIPHER;
230 : job->cipher_direction = DECRYPT;
231 : job->chain_order = HASH_CIPHER;
232 :
233 : job->u.HMAC._hashed_auth_key_xor_ipad = kd;
234 : job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
235 : job->user_data = op;
236 :
237 : job = IMB_SUBMIT_JOB (ptd->mgr);
238 :
239 : if (job)
240 : ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
241 : }
242 :
243 : while ((job = IMB_FLUSH_JOB (ptd->mgr)))
244 : ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
245 :
246 : return n_ops - n_fail;
247 : }
248 : #endif
249 :
250 : #define _(a, b, c, d, e, f) \
251 : static_always_inline u32 \
252 : ipsecmb_ops_hmac_##a (vlib_main_t * vm, \
253 : vnet_crypto_op_t * ops[], \
254 : u32 n_ops) \
255 : { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \
256 :
257 4345 : foreach_ipsecmb_hmac_op;
258 : #undef _
259 :
260 : always_inline void
261 111758 : ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
262 : {
263 111758 : vnet_crypto_op_t *op = job->user_data;
264 :
265 111758 : if (PREDICT_FALSE (STS_COMPLETED != job->status))
266 : {
267 0 : op->status = ipsecmb_status_job (job->status);
268 0 : *n_fail = *n_fail + 1;
269 : }
270 : else
271 111758 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
272 111758 : }
273 :
274 : #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
275 : static_always_inline u32
276 51543 : ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
277 : u32 n_ops, u32 key_len,
278 : JOB_CIPHER_DIRECTION direction,
279 : JOB_CIPHER_MODE cipher_mode)
280 : {
281 51543 : ipsecmb_main_t *imbm = &ipsecmb_main;
282 51543 : ipsecmb_per_thread_data_t *ptd =
283 51543 : vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
284 : JOB_AES_HMAC *job;
285 51543 : u32 i, n_fail = 0, ops_index = 0;
286 51543 : const u32 burst_sz =
287 : (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
288 :
289 103086 : while (n_ops)
290 : {
291 51543 : const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
292 :
293 163301 : for (i = 0; i < n; i++)
294 : {
295 : ipsecmb_aes_key_data_t *kd;
296 111758 : vnet_crypto_op_t *op = ops[ops_index++];
297 111758 : kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
298 :
299 111758 : job = &ptd->burst_jobs[i];
300 :
301 111758 : job->src = op->src;
302 111758 : job->dst = op->dst;
303 111758 : job->msg_len_to_cipher_in_bytes = op->len;
304 111758 : job->cipher_start_src_offset_in_bytes = 0;
305 :
306 111758 : job->hash_alg = NULL_HASH;
307 :
308 111758 : job->aes_enc_key_expanded = kd->enc_key_exp;
309 111758 : job->aes_dec_key_expanded = kd->dec_key_exp;
310 111758 : job->iv = op->iv;
311 111758 : job->iv_len_in_bytes = AES_BLOCK_SIZE;
312 :
313 111758 : job->user_data = op;
314 : }
315 :
316 51543 : IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
317 : cipher_mode, direction, key_len / 8);
318 163301 : for (i = 0; i < n; i++)
319 : {
320 111758 : job = &ptd->burst_jobs[i];
321 111758 : ipsecmb_retire_cipher_job (job, &n_fail);
322 : }
323 :
324 51543 : n_ops -= n;
325 : }
326 :
327 51543 : return ops_index - n_fail;
328 : }
329 : #else
330 : static_always_inline u32
331 : ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
332 : u32 n_ops, u32 key_len,
333 : JOB_CIPHER_DIRECTION direction,
334 : JOB_CIPHER_MODE cipher_mode)
335 : {
336 : ipsecmb_main_t *imbm = &ipsecmb_main;
337 : ipsecmb_per_thread_data_t *ptd =
338 : vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
339 : JOB_AES_HMAC *job;
340 : u32 i, n_fail = 0;
341 :
342 : for (i = 0; i < n_ops; i++)
343 : {
344 : ipsecmb_aes_key_data_t *kd;
345 : vnet_crypto_op_t *op = ops[i];
346 : kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
347 :
348 : job = IMB_GET_NEXT_JOB (ptd->mgr);
349 :
350 : job->src = op->src;
351 : job->dst = op->dst;
352 : job->msg_len_to_cipher_in_bytes = op->len;
353 : job->cipher_start_src_offset_in_bytes = 0;
354 :
355 : job->hash_alg = NULL_HASH;
356 : job->cipher_mode = cipher_mode;
357 : job->cipher_direction = direction;
358 : job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
359 :
360 : job->aes_key_len_in_bytes = key_len / 8;
361 : job->aes_enc_key_expanded = kd->enc_key_exp;
362 : job->aes_dec_key_expanded = kd->dec_key_exp;
363 : job->iv = op->iv;
364 : job->iv_len_in_bytes = AES_BLOCK_SIZE;
365 :
366 : job->user_data = op;
367 :
368 : job = IMB_SUBMIT_JOB (ptd->mgr);
369 :
370 : if (job)
371 : ipsecmb_retire_cipher_job (job, &n_fail);
372 : }
373 :
374 : while ((job = IMB_FLUSH_JOB (ptd->mgr)))
375 : ipsecmb_retire_cipher_job (job, &n_fail);
376 :
377 : return n_ops - n_fail;
378 : }
379 : #endif
380 :
381 : #define _(a, b, c) \
382 : static_always_inline u32 ipsecmb_ops_cipher_enc_##a ( \
383 : vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
384 : { \
385 : return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c); \
386 : } \
387 : \
388 : static_always_inline u32 ipsecmb_ops_cipher_dec_##a ( \
389 : vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
390 : { \
391 : return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c); \
392 : }
393 :
394 51543 : foreach_ipsecmb_cipher_op;
395 : #undef _
396 :
397 : #define _(a, b) \
398 : static_always_inline u32 \
399 : ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \
400 : vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
401 : { \
402 : ipsecmb_main_t *imbm = &ipsecmb_main; \
403 : ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
404 : vm->thread_index); \
405 : MB_MGR *m = ptd->mgr; \
406 : vnet_crypto_op_chunk_t *chp; \
407 : u32 i, j; \
408 : \
409 : for (i = 0; i < n_ops; i++) \
410 : { \
411 : struct gcm_key_data *kd; \
412 : struct gcm_context_data ctx; \
413 : vnet_crypto_op_t *op = ops[i]; \
414 : \
415 : kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
416 : ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \
417 : IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \
418 : chp = chunks + op->chunk_index; \
419 : for (j = 0; j < op->n_chunks; j++) \
420 : { \
421 : IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \
422 : chp->len); \
423 : chp += 1; \
424 : } \
425 : IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len); \
426 : \
427 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
428 : } \
429 : \
430 : return n_ops; \
431 : } \
432 : \
433 : static_always_inline u32 \
434 : ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
435 : u32 n_ops) \
436 : { \
437 : ipsecmb_main_t *imbm = &ipsecmb_main; \
438 : ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
439 : vm->thread_index); \
440 : MB_MGR *m = ptd->mgr; \
441 : u32 i; \
442 : \
443 : for (i = 0; i < n_ops; i++) \
444 : { \
445 : struct gcm_key_data *kd; \
446 : struct gcm_context_data ctx; \
447 : vnet_crypto_op_t *op = ops[i]; \
448 : \
449 : kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
450 : IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \
451 : op->aad, op->aad_len, op->tag, op->tag_len); \
452 : \
453 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
454 : } \
455 : \
456 : return n_ops; \
457 : } \
458 : \
459 : static_always_inline u32 \
460 : ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \
461 : vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
462 : { \
463 : ipsecmb_main_t *imbm = &ipsecmb_main; \
464 : ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
465 : vm->thread_index); \
466 : MB_MGR *m = ptd->mgr; \
467 : vnet_crypto_op_chunk_t *chp; \
468 : u32 i, j, n_failed = 0; \
469 : \
470 : for (i = 0; i < n_ops; i++) \
471 : { \
472 : struct gcm_key_data *kd; \
473 : struct gcm_context_data ctx; \
474 : vnet_crypto_op_t *op = ops[i]; \
475 : u8 scratch[64]; \
476 : \
477 : kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
478 : ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \
479 : IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \
480 : chp = chunks + op->chunk_index; \
481 : for (j = 0; j < op->n_chunks; j++) \
482 : { \
483 : IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \
484 : chp->len); \
485 : chp += 1; \
486 : } \
487 : IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len); \
488 : \
489 : if ((memcmp (op->tag, scratch, op->tag_len))) \
490 : { \
491 : op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \
492 : n_failed++; \
493 : } \
494 : else \
495 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
496 : } \
497 : \
498 : return n_ops - n_failed; \
499 : } \
500 : \
501 : static_always_inline u32 \
502 : ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
503 : u32 n_ops) \
504 : { \
505 : ipsecmb_main_t *imbm = &ipsecmb_main; \
506 : ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \
507 : vm->thread_index); \
508 : MB_MGR *m = ptd->mgr; \
509 : u32 i, n_failed = 0; \
510 : \
511 : for (i = 0; i < n_ops; i++) \
512 : { \
513 : struct gcm_key_data *kd; \
514 : struct gcm_context_data ctx; \
515 : vnet_crypto_op_t *op = ops[i]; \
516 : u8 scratch[64]; \
517 : \
518 : kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
519 : IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \
520 : op->aad, op->aad_len, scratch, op->tag_len); \
521 : \
522 : if ((memcmp (op->tag, scratch, op->tag_len))) \
523 : { \
524 : op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \
525 : n_failed++; \
526 : } \
527 : else \
528 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
529 : } \
530 : \
531 : return n_ops - n_failed; \
532 : }
533 :
534 101287 : foreach_ipsecmb_gcm_cipher_op;
535 : #undef _
536 :
537 : #ifdef HAVE_IPSECMB_CHACHA_POLY
538 : always_inline void
539 8710 : ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail)
540 : {
541 8710 : vnet_crypto_op_t *op = job->user_data;
542 8710 : u32 len = op->tag_len;
543 :
544 8710 : if (PREDICT_FALSE (STS_COMPLETED != job->status))
545 : {
546 0 : op->status = ipsecmb_status_job (job->status);
547 0 : *n_fail = *n_fail + 1;
548 0 : return;
549 : }
550 :
551 8710 : if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
552 : {
553 4921 : if (memcmp (op->tag, job->auth_tag_output, len))
554 : {
555 16 : *n_fail = *n_fail + 1;
556 16 : op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
557 16 : return;
558 : }
559 : }
560 :
561 8694 : clib_memcpy_fast (op->tag, job->auth_tag_output, len);
562 :
563 8694 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
564 : }
565 :
566 : static_always_inline u32
567 1846 : ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
568 : IMB_CIPHER_DIRECTION dir)
569 : {
570 1846 : ipsecmb_main_t *imbm = &ipsecmb_main;
571 1846 : ipsecmb_per_thread_data_t *ptd =
572 1846 : vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
573 : struct IMB_JOB *job;
574 1846 : MB_MGR *m = ptd->mgr;
575 1846 : u32 i, n_fail = 0, last_key_index = ~0;
576 : u8 scratch[VLIB_FRAME_SIZE][16];
577 1846 : u8 *key = 0;
578 :
579 10556 : for (i = 0; i < n_ops; i++)
580 : {
581 8710 : vnet_crypto_op_t *op = ops[i];
582 :
583 8710 : job = IMB_GET_NEXT_JOB (m);
584 8710 : if (last_key_index != op->key_index)
585 : {
586 1846 : vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
587 :
588 1846 : key = kd->data;
589 1846 : last_key_index = op->key_index;
590 : }
591 :
592 8710 : job->cipher_direction = dir;
593 8710 : job->chain_order = IMB_ORDER_HASH_CIPHER;
594 8710 : job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
595 8710 : job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
596 8710 : job->enc_keys = job->dec_keys = key;
597 8710 : job->key_len_in_bytes = 32;
598 :
599 8710 : job->u.CHACHA20_POLY1305.aad = op->aad;
600 8710 : job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
601 8710 : job->src = op->src;
602 8710 : job->dst = op->dst;
603 :
604 8710 : job->iv = op->iv;
605 8710 : job->iv_len_in_bytes = 12;
606 8710 : job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
607 8710 : op->len;
608 8710 : job->cipher_start_src_offset_in_bytes =
609 8710 : job->hash_start_src_offset_in_bytes = 0;
610 :
611 8710 : job->auth_tag_output = scratch[i];
612 8710 : job->auth_tag_output_len_in_bytes = 16;
613 :
614 8710 : job->user_data = op;
615 :
616 8710 : job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
617 8710 : if (job)
618 8710 : ipsecmb_retire_aead_job (job, &n_fail);
619 :
620 8710 : op++;
621 : }
622 :
623 1846 : while ((job = IMB_FLUSH_JOB (ptd->mgr)))
624 0 : ipsecmb_retire_aead_job (job, &n_fail);
625 :
626 1846 : return n_ops - n_fail;
627 : }
628 :
629 : static_always_inline u32
630 476 : ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
631 : u32 n_ops)
632 : {
633 476 : return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
634 : }
635 :
636 : static_always_inline u32
637 1370 : ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
638 : u32 n_ops)
639 : {
640 1370 : return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
641 : }
642 :
643 : static_always_inline u32
644 16 : ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
645 : vnet_crypto_op_chunk_t *chunks, u32 n_ops,
646 : IMB_CIPHER_DIRECTION dir)
647 : {
648 16 : ipsecmb_main_t *imbm = &ipsecmb_main;
649 16 : ipsecmb_per_thread_data_t *ptd =
650 16 : vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
651 16 : MB_MGR *m = ptd->mgr;
652 16 : u32 i, n_fail = 0, last_key_index = ~0;
653 16 : u8 *key = 0;
654 :
655 16 : if (dir == IMB_DIR_ENCRYPT)
656 : {
657 32 : for (i = 0; i < n_ops; i++)
658 : {
659 24 : vnet_crypto_op_t *op = ops[i];
660 : struct chacha20_poly1305_context_data ctx;
661 : vnet_crypto_op_chunk_t *chp;
662 : u32 j;
663 :
664 24 : ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
665 :
666 24 : if (last_key_index != op->key_index)
667 : {
668 8 : vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
669 :
670 8 : key = kd->data;
671 8 : last_key_index = op->key_index;
672 : }
673 :
674 24 : IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
675 : op->aad_len);
676 :
677 24 : chp = chunks + op->chunk_index;
678 76 : for (j = 0; j < op->n_chunks; j++)
679 : {
680 52 : IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
681 : chp->src, chp->len);
682 52 : chp += 1;
683 : }
684 :
685 24 : IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
686 :
687 24 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
688 : }
689 : }
690 : else /* dir == IMB_DIR_DECRYPT */
691 : {
692 32 : for (i = 0; i < n_ops; i++)
693 : {
694 24 : vnet_crypto_op_t *op = ops[i];
695 : struct chacha20_poly1305_context_data ctx;
696 : vnet_crypto_op_chunk_t *chp;
697 : u8 scratch[16];
698 : u32 j;
699 :
700 24 : ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
701 :
702 24 : if (last_key_index != op->key_index)
703 : {
704 8 : vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
705 :
706 8 : key = kd->data;
707 8 : last_key_index = op->key_index;
708 : }
709 :
710 24 : IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
711 : op->aad_len);
712 :
713 24 : chp = chunks + op->chunk_index;
714 76 : for (j = 0; j < op->n_chunks; j++)
715 : {
716 52 : IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
717 : chp->src, chp->len);
718 52 : chp += 1;
719 : }
720 :
721 24 : IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
722 :
723 24 : if (memcmp (op->tag, scratch, op->tag_len))
724 : {
725 0 : n_fail = n_fail + 1;
726 0 : op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
727 : }
728 : else
729 24 : op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
730 : }
731 : }
732 :
733 16 : return n_ops - n_fail;
734 : }
735 :
736 : static_always_inline u32
737 8 : ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
738 : vnet_crypto_op_chunk_t *chunks,
739 : u32 n_ops)
740 : {
741 8 : return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
742 : IMB_DIR_ENCRYPT);
743 : }
744 :
745 : static_always_inline u32
746 8 : ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
747 : vnet_crypto_op_chunk_t *chunks,
748 : u32 n_ops)
749 : {
750 8 : return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
751 : IMB_DIR_DECRYPT);
752 : }
753 : #endif
754 :
755 : static void
756 327727 : crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
757 : vnet_crypto_key_index_t idx)
758 : {
759 327727 : ipsecmb_main_t *imbm = &ipsecmb_main;
760 327727 : vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
761 327727 : ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
762 : u32 i;
763 : void *kd;
764 :
765 : /** TODO: add linked alg support **/
766 327727 : if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
767 4404 : return;
768 :
769 323323 : if (kop == VNET_CRYPTO_KEY_OP_DEL)
770 : {
771 161638 : if (idx >= vec_len (imbm->key_data))
772 1324 : return;
773 :
774 160314 : if (imbm->key_data[idx] == 0)
775 1264 : return;
776 :
777 159050 : clib_mem_free_s (imbm->key_data[idx]);
778 159050 : imbm->key_data[idx] = 0;
779 159050 : return;
780 : }
781 :
782 161685 : if (ad->data_size == 0)
783 2588 : return;
784 :
785 159097 : vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
786 :
787 159097 : if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
788 : {
789 0 : clib_mem_free_s (imbm->key_data[idx]);
790 : }
791 :
792 159097 : kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
793 : CLIB_CACHE_LINE_BYTES);
794 :
795 : /* AES CBC key expansion */
796 159097 : if (ad->keyexp)
797 : {
798 53711 : ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
799 53711 : ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
800 53711 : return;
801 : }
802 :
803 : /* AES GCM */
804 105386 : if (ad->aes_gcm_pre)
805 : {
806 100936 : ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
807 100936 : return;
808 : }
809 :
810 : /* HMAC */
811 4450 : if (ad->hash_one_block)
812 4450 : {
813 4450 : const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
814 4450 : u64 pad[block_qw], key_hash[block_qw];
815 :
816 4450 : clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
817 4450 : if (vec_len (key->data) <= ad->block_size)
818 4436 : clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
819 : else
820 14 : ad->hash_fn (key->data, vec_len (key->data), key_hash);
821 :
822 75650 : for (i = 0; i < block_qw; i++)
823 71200 : pad[i] = key_hash[i] ^ 0x3636363636363636;
824 4450 : ad->hash_one_block (pad, kd);
825 :
826 75650 : for (i = 0; i < block_qw; i++)
827 71200 : pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
828 4450 : ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
829 :
830 4450 : return;
831 : }
832 : }
833 :
834 : static clib_error_t *
835 559 : crypto_ipsecmb_init (vlib_main_t * vm)
836 : {
837 559 : ipsecmb_main_t *imbm = &ipsecmb_main;
838 : ipsecmb_alg_data_t *ad;
839 : ipsecmb_per_thread_data_t *ptd;
840 559 : vlib_thread_main_t *tm = vlib_get_thread_main ();
841 559 : MB_MGR *m = 0;
842 : u32 eidx;
843 : u8 *name;
844 :
845 559 : if (!clib_cpu_supports_aes ())
846 0 : return 0;
847 :
848 : /*
849 : * A priority that is better than OpenSSL but worse than VPP natvie
850 : */
851 559 : name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
852 : IMB_VERSION_STR, 0);
853 559 : eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);
854 :
855 559 : vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
856 : CLIB_CACHE_LINE_BYTES);
857 :
858 : /* *INDENT-OFF* */
859 1172 : vec_foreach (ptd, imbm->per_thread_data)
860 : {
861 613 : ptd->mgr = alloc_mb_mgr (0);
862 : #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
863 613 : clib_memset_u8 (ptd->burst_jobs, 0,
864 : sizeof (JOB_AES_HMAC) * IMB_MAX_BURST_SIZE);
865 : #endif
866 613 : if (clib_cpu_supports_avx512f ())
867 613 : init_mb_mgr_avx512 (ptd->mgr);
868 0 : else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
869 0 : init_mb_mgr_avx2 (ptd->mgr);
870 : else
871 0 : init_mb_mgr_sse (ptd->mgr);
872 :
873 613 : if (ptd == imbm->per_thread_data)
874 559 : m = ptd->mgr;
875 : }
876 : /* *INDENT-ON* */
877 :
878 : #define _(a, b, c, d, e, f) \
879 : vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
880 : ipsecmb_ops_hmac_##a); \
881 : ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a; \
882 : ad->block_size = d; \
883 : ad->data_size = e * 2; \
884 : ad->hash_one_block = m-> c##_one_block; \
885 : ad->hash_fn = m-> c; \
886 :
887 559 : foreach_ipsecmb_hmac_op;
888 : #undef _
889 : #define _(a, b, c) \
890 : vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
891 : ipsecmb_ops_cipher_enc_##a); \
892 : vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
893 : ipsecmb_ops_cipher_dec_##a); \
894 : ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
895 : ad->data_size = sizeof (ipsecmb_aes_key_data_t); \
896 : ad->keyexp = m->keyexp_##b;
897 :
898 559 : foreach_ipsecmb_cipher_op;
899 : #undef _
900 : #define _(a, b) \
901 : vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
902 : ipsecmb_ops_gcm_cipher_enc_##a); \
903 : vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
904 : ipsecmb_ops_gcm_cipher_dec_##a); \
905 : vnet_crypto_register_chained_ops_handler \
906 : (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
907 : ipsecmb_ops_gcm_cipher_enc_##a##_chained); \
908 : vnet_crypto_register_chained_ops_handler \
909 : (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
910 : ipsecmb_ops_gcm_cipher_dec_##a##_chained); \
911 : ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
912 : ad->data_size = sizeof (struct gcm_key_data); \
913 : ad->aes_gcm_pre = m->gcm##b##_pre; \
914 :
915 559 : foreach_ipsecmb_gcm_cipher_op;
916 : #undef _
917 :
918 : #ifdef HAVE_IPSECMB_CHACHA_POLY
919 559 : vnet_crypto_register_ops_handler (vm, eidx,
920 : VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
921 : ipsecmb_ops_chacha_poly_enc);
922 559 : vnet_crypto_register_ops_handler (vm, eidx,
923 : VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
924 : ipsecmb_ops_chacha_poly_dec);
925 559 : vnet_crypto_register_chained_ops_handler (
926 : vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
927 : ipsec_mb_ops_chacha_poly_enc_chained);
928 559 : vnet_crypto_register_chained_ops_handler (
929 : vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
930 : ipsec_mb_ops_chacha_poly_dec_chained);
931 559 : ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
932 559 : ad->data_size = 0;
933 : #endif
934 :
935 559 : vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
936 559 : return (NULL);
937 : }
938 :
939 : /* *INDENT-OFF* */
940 1119 : VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
941 : {
942 : .runs_after = VLIB_INITS ("vnet_crypto_init"),
943 : };
944 : /* *INDENT-ON* */
945 :
946 : /* *INDENT-OFF* */
947 : VLIB_PLUGIN_REGISTER () =
948 : {
949 : .version = VPP_BUILD_VER,
950 : .description = "Intel IPSEC Multi-buffer Crypto Engine",
951 : };
952 : /* *INDENT-ON* */
953 :
954 : /*
955 : * fd.io coding-style-patch-verification: ON
956 : *
957 : * Local Variables:
958 : * eval: (c-set-style "gnu")
959 : * End:
960 : */
|