Line data Source code
1 : /*
2 : * Copyright (c) 2020 Intel and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vlib/vlib.h>
17 : #include <vnet/plugin/plugin.h>
18 : #include <vpp/app/version.h>
19 :
20 : #include "crypto_sw_scheduler.h"
21 :
22 : int
23 0 : crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 : {
25 0 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26 0 : vlib_thread_main_t *tm = vlib_get_thread_main ();
27 0 : crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28 0 : u32 count = 0, i;
29 :
30 0 : if (worker_idx >= vlib_num_workers ())
31 : {
32 0 : return VNET_API_ERROR_INVALID_VALUE;
33 : }
34 :
35 0 : for (i = 0; i < tm->n_vlib_mains; i++)
36 : {
37 0 : ptd = cm->per_thread_data + i;
38 0 : count += ptd->self_crypto_enabled;
39 : }
40 :
41 0 : if (enabled || count > 1)
42 : {
43 0 : cm->per_thread_data[vlib_get_worker_thread_index
44 0 : (worker_idx)].self_crypto_enabled = enabled;
45 : }
46 : else /* cannot disable all crypto workers */
47 : {
48 0 : return VNET_API_ERROR_INVALID_VALUE_2;
49 : }
50 0 : return 0;
51 : }
52 :
53 : static void
54 327727 : crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55 : vnet_crypto_key_index_t idx)
56 : {
57 327727 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58 327727 : vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59 :
60 327727 : vec_validate (cm->keys, idx);
61 :
62 327727 : if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63 : {
64 4404 : if (kop == VNET_CRYPTO_KEY_OP_DEL)
65 : {
66 0 : cm->keys[idx].index_crypto = UINT32_MAX;
67 0 : cm->keys[idx].index_integ = UINT32_MAX;
68 : }
69 : else
70 : {
71 4404 : cm->keys[idx] = *key;
72 : }
73 : }
74 327727 : }
75 :
76 : static int
77 2792 : crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78 : vnet_crypto_async_frame_t *frame, u8 is_enc)
79 : {
80 2792 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81 2792 : crypto_sw_scheduler_per_thread_data_t *ptd =
82 2792 : vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83 2792 : crypto_sw_scheduler_queue_t *current_queue =
84 2792 : is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85 : &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86 2792 : u64 head = current_queue->head;
87 :
88 2792 : if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
89 : {
90 0 : u32 n_elts = frame->n_elts, i;
91 0 : for (i = 0; i < n_elts; i++)
92 0 : frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93 0 : return -1;
94 : }
95 :
96 2792 : current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
97 2792 : head += 1;
98 2792 : CLIB_MEMORY_STORE_BARRIER ();
99 2792 : current_queue->head = head;
100 2792 : return 0;
101 : }
102 :
103 : static int
104 1393 : crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105 : vnet_crypto_async_frame_t *frame)
106 : {
107 1393 : return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
108 : }
109 : static int
110 1399 : crypto_sw_scheduler_frame_enqueue_encrypt (
111 : vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
112 : {
113 :
114 1399 : return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
115 : }
116 :
117 : static_always_inline void
118 55500 : cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119 : crypto_sw_scheduler_per_thread_data_t *ptd,
120 : vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
121 : u32 len)
122 : {
123 : vnet_crypto_op_chunk_t *ch;
124 : u32 n_chunks;
125 :
126 : /*
127 : * offset is relative to b->data (can be negative if we stay in pre_data
128 : * area). Make sure it does not go beyond the 1st buffer.
129 : */
130 55500 : ASSERT (b->current_data + b->current_length > offset);
131 55500 : offset = clib_min (b->current_data + b->current_length, offset);
132 :
133 55500 : op->chunk_index = vec_len (ptd->chunks);
134 :
135 55500 : vec_add2 (ptd->chunks, ch, 1);
136 55500 : ch->src = ch->dst = b->data + offset;
137 55500 : ch->len = clib_min (b->current_data + b->current_length - offset, len);
138 55500 : len -= ch->len;
139 55500 : n_chunks = 1;
140 :
141 129224 : while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
142 : {
143 73724 : b = vlib_get_buffer (vm, b->next_buffer);
144 73724 : vec_add2 (ptd->chunks, ch, 1);
145 73724 : ch->src = ch->dst = vlib_buffer_get_current (b);
146 73724 : ch->len = clib_min (b->current_length, len);
147 73724 : len -= ch->len;
148 73724 : n_chunks++;
149 : }
150 :
151 55500 : if (len)
152 : {
153 : /* Some async crypto users can use buffers in creative ways, let's allow
154 : * some flexibility here...
155 : * Current example is ESP decrypt with ESN in async mode: it will stash
156 : * ESN at the end of the last buffer (if it can) because it must be part
157 : * of the integrity check but it will not update the buffer length.
158 : * Fixup the last operation chunk length if we have room.
159 : */
160 536 : ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161 536 : if (vlib_buffer_space_left_at_end (vm, b) >= len)
162 536 : ch->len += len;
163 : }
164 :
165 55500 : op->n_chunks = n_chunks;
166 55500 : }
167 :
168 : static_always_inline void
169 47944 : crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170 : crypto_sw_scheduler_per_thread_data_t * ptd,
171 : vnet_crypto_async_frame_elt_t * fe,
172 : u32 index, u32 bi,
173 : vnet_crypto_op_id_t op_id, u16 aad_len,
174 : u8 tag_len)
175 : {
176 47944 : vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177 47944 : vnet_crypto_op_t *op = 0;
178 :
179 47944 : if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
180 : {
181 23340 : vec_add2 (ptd->chained_crypto_ops, op, 1);
182 23340 : cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183 : fe->crypto_total_length);
184 : }
185 : else
186 : {
187 24604 : vec_add2 (ptd->crypto_ops, op, 1);
188 24604 : op->src = op->dst = b->data + fe->crypto_start_offset;
189 24604 : op->len = fe->crypto_total_length;
190 : }
191 :
192 47944 : op->op = op_id;
193 47944 : op->tag = fe->tag;
194 47944 : op->flags = fe->flags;
195 47944 : op->key_index = fe->key_index;
196 47944 : op->iv = fe->iv;
197 47944 : op->aad = fe->aad;
198 47944 : op->aad_len = aad_len;
199 47944 : op->tag_len = tag_len;
200 47944 : op->user_data = index;
201 47944 : }
202 :
203 : static_always_inline void
204 33268 : crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205 : crypto_sw_scheduler_per_thread_data_t
206 : * ptd, vnet_crypto_key_t * key,
207 : vnet_crypto_async_frame_elt_t * fe,
208 : u32 index, u32 bi,
209 : vnet_crypto_op_id_t crypto_op_id,
210 : vnet_crypto_op_id_t integ_op_id,
211 : u32 digest_len, u8 is_enc)
212 : {
213 33268 : vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214 33271 : vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
215 :
216 33271 : if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
217 : {
218 16080 : vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219 16080 : vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220 16080 : cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221 16080 : fe->crypto_start_offset,
222 : fe->crypto_total_length);
223 16080 : cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224 16080 : fe->integ_start_offset,
225 16080 : fe->crypto_total_length +
226 16080 : fe->integ_length_adj);
227 : }
228 : else
229 : {
230 17191 : vec_add2 (ptd->crypto_ops, crypto_op, 1);
231 17171 : vec_add2 (ptd->integ_ops, integ_op, 1);
232 17172 : crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233 17172 : crypto_op->len = fe->crypto_total_length;
234 17172 : integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235 17172 : integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
236 : }
237 :
238 33252 : crypto_op->op = crypto_op_id;
239 33252 : crypto_op->iv = fe->iv;
240 33252 : crypto_op->key_index = key->index_crypto;
241 33252 : crypto_op->user_data = 0;
242 33252 : crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243 33252 : integ_op->op = integ_op_id;
244 33252 : integ_op->digest = fe->digest;
245 33252 : integ_op->digest_len = digest_len;
246 33252 : integ_op->key_index = key->index_integ;
247 33252 : integ_op->flags = fe->flags;
248 33252 : crypto_op->user_data = integ_op->user_data = index;
249 33252 : }
250 :
251 : static_always_inline void
252 3912 : process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253 : vnet_crypto_op_t * ops, u8 * state)
254 : {
255 3912 : u32 n_fail, n_ops = vec_len (ops);
256 3912 : vnet_crypto_op_t *op = ops;
257 :
258 3912 : if (n_ops == 0)
259 1656 : return;
260 :
261 2256 : n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
262 :
263 : /*
264 : * If we had a failure in the ops then we need to walk all the ops
265 : * and set the status in the corresponding frame. This status is
266 : * not set in the case with no failures, as in that case the overall
267 : * frame status is success.
268 : */
269 2256 : if (n_fail)
270 : {
271 345 : for (int i = 0; i < n_ops; i++)
272 : {
273 295 : ASSERT (op - ops < n_ops);
274 :
275 295 : f->elts[op->user_data].status = op->status;
276 295 : op++;
277 : }
278 50 : *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
279 : }
280 : }
281 :
282 : static_always_inline void
283 3912 : process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
284 : vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
285 : u8 * state)
286 : {
287 3912 : u32 n_fail, n_ops = vec_len (ops);
288 3912 : vnet_crypto_op_t *op = ops;
289 :
290 3912 : if (n_ops == 0)
291 2248 : return;
292 :
293 1664 : n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
294 :
295 : /*
296 : * If we had a failure in the ops then we need to walk all the ops
297 : * and set the status in the corresponding frame. This status is
298 : * not set in the case with no failures, as in that case the overall
299 : * frame status is success.
300 : */
301 1664 : if (n_fail)
302 : {
303 0 : for (int i = 0; i < n_ops; i++)
304 : {
305 0 : ASSERT (op - ops < n_ops);
306 :
307 0 : f->elts[op->user_data].status = op->status;
308 0 : op++;
309 : }
310 0 : *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
311 : }
312 : }
313 :
314 : static_always_inline void
315 1672 : crypto_sw_scheduler_process_aead (vlib_main_t *vm,
316 : crypto_sw_scheduler_per_thread_data_t *ptd,
317 : vnet_crypto_async_frame_t *f, u32 aead_op,
318 : u32 aad_len, u32 digest_len)
319 : {
320 : vnet_crypto_async_frame_elt_t *fe;
321 : u32 *bi;
322 1672 : u32 n_elts = f->n_elts;
323 1672 : u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
324 :
325 1672 : vec_reset_length (ptd->crypto_ops);
326 1672 : vec_reset_length (ptd->integ_ops);
327 1672 : vec_reset_length (ptd->chained_crypto_ops);
328 1672 : vec_reset_length (ptd->chained_integ_ops);
329 1672 : vec_reset_length (ptd->chunks);
330 :
331 1672 : fe = f->elts;
332 1672 : bi = f->buffer_indices;
333 :
334 49616 : while (n_elts--)
335 : {
336 47944 : if (n_elts > 1)
337 44815 : clib_prefetch_load (fe + 1);
338 :
339 47944 : crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
340 : aead_op, aad_len, digest_len);
341 47944 : bi++;
342 47944 : fe++;
343 : }
344 :
345 1672 : process_ops (vm, f, ptd->crypto_ops, &state);
346 1672 : process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
347 : &state);
348 1672 : f->state = state;
349 1672 : }
350 :
351 : static_always_inline void
352 1120 : crypto_sw_scheduler_process_link (
353 : vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
354 : crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
355 : u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
356 : {
357 : vnet_crypto_async_frame_elt_t *fe;
358 : u32 *bi;
359 1120 : u32 n_elts = f->n_elts;
360 1120 : u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
361 :
362 1120 : vec_reset_length (ptd->crypto_ops);
363 1120 : vec_reset_length (ptd->integ_ops);
364 1120 : vec_reset_length (ptd->chained_crypto_ops);
365 1120 : vec_reset_length (ptd->chained_integ_ops);
366 1120 : vec_reset_length (ptd->chunks);
367 1120 : fe = f->elts;
368 1120 : bi = f->buffer_indices;
369 :
370 34382 : while (n_elts--)
371 : {
372 33266 : if (n_elts > 1)
373 31166 : clib_prefetch_load (fe + 1);
374 :
375 33267 : crypto_sw_scheduler_convert_link_crypto (
376 33267 : vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
377 : crypto_op, auth_op, digest_len, is_enc);
378 33262 : bi++;
379 33262 : fe++;
380 : }
381 :
382 1116 : if (is_enc)
383 : {
384 566 : process_ops (vm, f, ptd->crypto_ops, &state);
385 566 : process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
386 : &state);
387 566 : process_ops (vm, f, ptd->integ_ops, &state);
388 566 : process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
389 : &state);
390 : }
391 : else
392 : {
393 550 : process_ops (vm, f, ptd->integ_ops, &state);
394 554 : process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
395 : &state);
396 554 : process_ops (vm, f, ptd->crypto_ops, &state);
397 554 : process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
398 : &state);
399 : }
400 :
401 1120 : f->state = state;
402 1120 : }
403 :
404 : static_always_inline int
405 2792 : convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
406 : u32 *crypto_op, u32 *auth_op_or_aad_len,
407 : u16 *digest_len, u8 *is_enc)
408 : {
409 2792 : switch (async_op_id)
410 : {
411 : #define _(n, s, k, t, a) \
412 : case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC: \
413 : *crypto_op = VNET_CRYPTO_OP_##n##_ENC; \
414 : *auth_op_or_aad_len = a; \
415 : *digest_len = t; \
416 : *is_enc = 1; \
417 : return 1; \
418 : case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC: \
419 : *crypto_op = VNET_CRYPTO_OP_##n##_DEC; \
420 : *auth_op_or_aad_len = a; \
421 : *digest_len = t; \
422 : *is_enc = 0; \
423 : return 1;
424 1672 : foreach_crypto_aead_async_alg
425 : #undef _
426 :
427 : #define _(c, h, s, k, d) \
428 : case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC: \
429 : *crypto_op = VNET_CRYPTO_OP_##c##_ENC; \
430 : *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
431 : *digest_len = d; \
432 : *is_enc = 1; \
433 : return 0; \
434 : case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC: \
435 : *crypto_op = VNET_CRYPTO_OP_##c##_DEC; \
436 : *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
437 : *digest_len = d; \
438 : *is_enc = 0; \
439 : return 0;
440 1120 : foreach_crypto_link_async_alg
441 : #undef _
442 :
443 0 : default : return -1;
444 : }
445 :
446 : return -1;
447 : }
448 :
449 : static_always_inline vnet_crypto_async_frame_t *
450 9597 : crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
451 : u32 *enqueue_thread_idx)
452 : {
453 9597 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
454 9597 : crypto_sw_scheduler_per_thread_data_t *ptd =
455 9597 : cm->per_thread_data + vm->thread_index;
456 9597 : vnet_crypto_async_frame_t *f = 0;
457 9597 : crypto_sw_scheduler_queue_t *current_queue = 0;
458 : u32 tail, head;
459 9597 : u8 found = 0;
460 :
461 : /* get a pending frame to process */
462 9597 : if (ptd->self_crypto_enabled)
463 : {
464 9597 : u32 i = ptd->last_serve_lcore_id + 1;
465 :
466 : while (1)
467 892 : {
468 : crypto_sw_scheduler_per_thread_data_t *st;
469 : u32 j;
470 :
471 10489 : if (i >= vec_len (cm->per_thread_data))
472 9590 : i = 0;
473 :
474 10489 : st = cm->per_thread_data + i;
475 :
476 10489 : if (ptd->last_serve_encrypt)
477 5235 : current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
478 : else
479 5254 : current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
480 :
481 10489 : tail = current_queue->tail;
482 10489 : head = current_queue->head;
483 :
484 : /* Skip this queue unless tail < head or head has overflowed
485 : * and tail has not. At the point where tail overflows (== 0),
486 : * the largest possible value of head is (queue size - 1).
487 : * Prior to that, the largest possible value of head is
488 : * (queue size - 2).
489 : */
490 10489 : if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK))
491 0 : goto skip_queue;
492 :
493 13210 : for (j = tail; j != head; j++)
494 : {
495 :
496 5513 : f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
497 :
498 5513 : if (!f)
499 0 : continue;
500 :
501 5513 : if (clib_atomic_bool_cmp_and_swap (
502 : &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
503 : VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
504 : {
505 2792 : found = 1;
506 2792 : break;
507 : }
508 : }
509 :
510 7697 : skip_queue:
511 10489 : if (found || i == ptd->last_serve_lcore_id)
512 : {
513 9597 : CLIB_MEMORY_STORE_BARRIER ();
514 9597 : ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
515 9597 : break;
516 : }
517 :
518 892 : i++;
519 : }
520 :
521 9597 : ptd->last_serve_lcore_id = i;
522 : }
523 :
524 9597 : if (found)
525 : {
526 : u32 crypto_op, auth_op_or_aad_len;
527 : u16 digest_len;
528 : u8 is_enc;
529 : int ret;
530 :
531 2792 : ret = convert_async_crypto_id (
532 2792 : f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
533 :
534 2792 : if (ret == 1)
535 1672 : crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
536 : auth_op_or_aad_len, digest_len);
537 1120 : else if (ret == 0)
538 1120 : crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
539 : auth_op_or_aad_len, digest_len,
540 : is_enc);
541 :
542 2792 : *enqueue_thread_idx = f->enqueue_thread_index;
543 2792 : *nb_elts_processed = f->n_elts;
544 : }
545 :
546 9597 : if (ptd->last_return_queue)
547 : {
548 4787 : current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
549 4787 : ptd->last_return_queue = 0;
550 : }
551 : else
552 : {
553 4810 : current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
554 4810 : ptd->last_return_queue = 1;
555 : }
556 :
557 9597 : tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
558 :
559 9597 : if (current_queue->jobs[tail] &&
560 2792 : current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
561 : {
562 :
563 2792 : CLIB_MEMORY_STORE_BARRIER ();
564 2792 : current_queue->tail++;
565 2792 : f = current_queue->jobs[tail];
566 2792 : current_queue->jobs[tail] = 0;
567 :
568 2792 : return f;
569 : }
570 :
571 6805 : return 0;
572 : }
573 :
574 : static clib_error_t *
575 0 : sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
576 : vlib_cli_command_t * cmd)
577 : {
578 0 : unformat_input_t _line_input, *line_input = &_line_input;
579 : u32 worker_index;
580 : u8 crypto_enable;
581 : int rv;
582 :
583 : /* Get a line of input. */
584 0 : if (!unformat_user (input, unformat_line_input, line_input))
585 0 : return 0;
586 :
587 0 : while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
588 : {
589 0 : if (unformat (line_input, "worker %u", &worker_index))
590 : {
591 0 : if (unformat (line_input, "crypto"))
592 : {
593 0 : if (unformat (line_input, "on"))
594 0 : crypto_enable = 1;
595 0 : else if (unformat (line_input, "off"))
596 0 : crypto_enable = 0;
597 : else
598 0 : return (clib_error_return (0, "unknown input '%U'",
599 : format_unformat_error,
600 : line_input));
601 : }
602 : else
603 0 : return (clib_error_return (0, "unknown input '%U'",
604 : format_unformat_error, line_input));
605 : }
606 : else
607 0 : return (clib_error_return (0, "unknown input '%U'",
608 : format_unformat_error, line_input));
609 : }
610 :
611 0 : rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
612 0 : if (rv == VNET_API_ERROR_INVALID_VALUE)
613 : {
614 0 : return (clib_error_return (0, "invalid worker idx: %d", worker_index));
615 : }
616 0 : else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
617 : {
618 0 : return (clib_error_return (0, "cannot disable all crypto workers"));
619 : }
620 0 : return 0;
621 : }
622 :
623 : /*?
624 : * This command sets if worker will do crypto processing.
625 : *
626 : * @cliexpar
627 : * Example of how to set worker crypto processing off:
628 : * @cliexstart{set sw_scheduler worker 0 crypto off}
629 : * @cliexend
630 : ?*/
631 : /* *INDENT-OFF* */
632 244327 : VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
633 : .path = "set sw_scheduler",
634 : .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
635 : .function = sw_scheduler_set_worker_crypto,
636 : .is_mp_safe = 1,
637 : };
638 : /* *INDENT-ON* */
639 :
640 : static clib_error_t *
641 0 : sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
642 : vlib_cli_command_t * cmd)
643 : {
644 0 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
645 : u32 i;
646 :
647 0 : vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
648 0 : for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
649 : {
650 0 : vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
651 0 : (vlib_worker_threads + i)->name,
652 0 : cm->
653 0 : per_thread_data[i].self_crypto_enabled ? "on" : "off");
654 : }
655 :
656 0 : return 0;
657 : }
658 :
659 : /*?
660 : * This command displays sw_scheduler workers.
661 : *
662 : * @cliexpar
663 : * Example of how to show workers:
664 : * @cliexstart{show sw_scheduler workers}
665 : * @cliexend
666 : ?*/
667 : /* *INDENT-OFF* */
668 244327 : VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
669 : .path = "show sw_scheduler workers",
670 : .short_help = "show sw_scheduler workers",
671 : .function = sw_scheduler_show_workers,
672 : .is_mp_safe = 1,
673 : };
674 : /* *INDENT-ON* */
675 :
676 : clib_error_t *
677 559 : sw_scheduler_cli_init (vlib_main_t * vm)
678 : {
679 559 : return 0;
680 : }
681 :
682 1119 : VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
683 :
684 : crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
685 : clib_error_t *
686 559 : crypto_sw_scheduler_init (vlib_main_t * vm)
687 : {
688 559 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
689 559 : vlib_thread_main_t *tm = vlib_get_thread_main ();
690 559 : clib_error_t *error = 0;
691 : crypto_sw_scheduler_per_thread_data_t *ptd;
692 :
693 559 : vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
694 : CLIB_CACHE_LINE_BYTES);
695 :
696 1172 : vec_foreach (ptd, cm->per_thread_data)
697 : {
698 613 : ptd->self_crypto_enabled = 1;
699 :
700 613 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
701 613 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
702 :
703 613 : vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
704 : CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
705 : CLIB_CACHE_LINE_BYTES);
706 :
707 613 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
708 613 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
709 :
710 613 : ptd->last_serve_encrypt = 0;
711 613 : ptd->last_return_queue = 0;
712 :
713 613 : vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
714 : CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
715 : CLIB_CACHE_LINE_BYTES);
716 : }
717 :
718 559 : cm->crypto_engine_index =
719 559 : vnet_crypto_register_engine (vm, "sw_scheduler", 100,
720 : "SW Scheduler Async Engine");
721 :
722 559 : vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
723 : crypto_sw_scheduler_key_handler);
724 :
725 559 : crypto_sw_scheduler_api_init (vm);
726 :
727 : /* *INDENT-OFF* */
728 : #define _(n, s, k, t, a) \
729 : vnet_crypto_register_enqueue_handler ( \
730 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
731 : crypto_sw_scheduler_frame_enqueue_encrypt); \
732 : vnet_crypto_register_enqueue_handler ( \
733 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
734 : crypto_sw_scheduler_frame_enqueue_decrypt);
735 559 : foreach_crypto_aead_async_alg
736 : #undef _
737 :
738 : #define _(c, h, s, k, d) \
739 : vnet_crypto_register_enqueue_handler ( \
740 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
741 : crypto_sw_scheduler_frame_enqueue_encrypt); \
742 : vnet_crypto_register_enqueue_handler ( \
743 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
744 : crypto_sw_scheduler_frame_enqueue_decrypt);
745 559 : foreach_crypto_link_async_alg
746 : #undef _
747 : /* *INDENT-ON* */
748 :
749 559 : vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
750 : crypto_sw_scheduler_dequeue);
751 :
752 559 : if (error)
753 0 : vec_free (cm->per_thread_data);
754 :
755 559 : return error;
756 : }
757 :
758 : /* *INDENT-OFF* */
759 1119 : VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
760 : .runs_after = VLIB_INITS ("vnet_crypto_init"),
761 : };
762 :
763 : VLIB_PLUGIN_REGISTER () = {
764 : .version = VPP_BUILD_VER,
765 : .description = "SW Scheduler Crypto Async Engine plugin",
766 : };
767 : /* *INDENT-ON* */
768 :
769 : /*
770 : * fd.io coding-style-patch-verification: ON
771 : *
772 : * Local Variables:
773 : * eval: (c-set-style "gnu")
774 : * End:
775 : */
|