Line data Source code
1 : /*
2 : * Copyright (c) 2020 Intel and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vlib/vlib.h>
17 : #include <vnet/plugin/plugin.h>
18 : #include <vpp/app/version.h>
19 :
20 : #include "crypto_sw_scheduler.h"
21 :
22 : int
23 0 : crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 : {
25 0 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26 0 : vlib_thread_main_t *tm = vlib_get_thread_main ();
27 0 : crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28 0 : u32 count = 0, i;
29 :
30 0 : if (worker_idx >= vlib_num_workers ())
31 : {
32 0 : return VNET_API_ERROR_INVALID_VALUE;
33 : }
34 :
35 0 : for (i = 0; i < tm->n_vlib_mains; i++)
36 : {
37 0 : ptd = cm->per_thread_data + i;
38 0 : count += ptd->self_crypto_enabled;
39 : }
40 :
41 0 : if (enabled || count > 1)
42 : {
43 0 : cm->per_thread_data[vlib_get_worker_thread_index
44 0 : (worker_idx)].self_crypto_enabled = enabled;
45 : }
46 : else /* cannot disable all crypto workers */
47 : {
48 0 : return VNET_API_ERROR_INVALID_VALUE_2;
49 : }
50 0 : return 0;
51 : }
52 :
53 : static void
54 334847 : crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55 : vnet_crypto_key_index_t idx)
56 : {
57 334847 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58 334847 : vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59 :
60 334847 : vec_validate (cm->keys, idx);
61 :
62 334847 : if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63 : {
64 4404 : if (kop == VNET_CRYPTO_KEY_OP_DEL)
65 : {
66 0 : cm->keys[idx].index_crypto = UINT32_MAX;
67 0 : cm->keys[idx].index_integ = UINT32_MAX;
68 : }
69 : else
70 : {
71 4404 : cm->keys[idx] = *key;
72 : }
73 : }
74 334847 : }
75 :
76 : static int
77 2792 : crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78 : vnet_crypto_async_frame_t *frame, u8 is_enc)
79 : {
80 2792 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81 2792 : crypto_sw_scheduler_per_thread_data_t *ptd =
82 2792 : vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83 2792 : crypto_sw_scheduler_queue_t *current_queue =
84 2792 : is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85 : &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86 2792 : u64 head = current_queue->head;
87 :
88 2792 : if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
89 : {
90 0 : u32 n_elts = frame->n_elts, i;
91 0 : for (i = 0; i < n_elts; i++)
92 0 : frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93 0 : return -1;
94 : }
95 :
96 2792 : current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
97 2792 : head += 1;
98 2792 : CLIB_MEMORY_STORE_BARRIER ();
99 2792 : current_queue->head = head;
100 2792 : return 0;
101 : }
102 :
103 : static int
104 1393 : crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105 : vnet_crypto_async_frame_t *frame)
106 : {
107 1393 : return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
108 : }
109 : static int
110 1399 : crypto_sw_scheduler_frame_enqueue_encrypt (
111 : vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
112 : {
113 :
114 1399 : return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
115 : }
116 :
117 : static_always_inline void
118 55500 : cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119 : crypto_sw_scheduler_per_thread_data_t *ptd,
120 : vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
121 : u32 len)
122 : {
123 : vnet_crypto_op_chunk_t *ch;
124 : u32 n_chunks;
125 :
126 : /*
127 : * offset is relative to b->data (can be negative if we stay in pre_data
128 : * area). Make sure it does not go beyond the 1st buffer.
129 : */
130 55500 : ASSERT (b->current_data + b->current_length > offset);
131 55500 : offset = clib_min (b->current_data + b->current_length, offset);
132 :
133 55500 : op->chunk_index = vec_len (ptd->chunks);
134 :
135 55500 : vec_add2 (ptd->chunks, ch, 1);
136 55500 : ch->src = ch->dst = b->data + offset;
137 55500 : ch->len = clib_min (b->current_data + b->current_length - offset, len);
138 55500 : len -= ch->len;
139 55500 : n_chunks = 1;
140 :
141 129224 : while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
142 : {
143 73724 : b = vlib_get_buffer (vm, b->next_buffer);
144 73724 : vec_add2 (ptd->chunks, ch, 1);
145 73724 : ch->src = ch->dst = vlib_buffer_get_current (b);
146 73724 : ch->len = clib_min (b->current_length, len);
147 73724 : len -= ch->len;
148 73724 : n_chunks++;
149 : }
150 :
151 55500 : if (len)
152 : {
153 : /* Some async crypto users can use buffers in creative ways, let's allow
154 : * some flexibility here...
155 : * Current example is ESP decrypt with ESN in async mode: it will stash
156 : * ESN at the end of the last buffer (if it can) because it must be part
157 : * of the integrity check but it will not update the buffer length.
158 : * Fixup the last operation chunk length if we have room.
159 : */
160 536 : ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161 536 : if (vlib_buffer_space_left_at_end (vm, b) >= len)
162 536 : ch->len += len;
163 : }
164 :
165 55500 : op->n_chunks = n_chunks;
166 55500 : }
167 :
168 : static_always_inline void
169 47944 : crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170 : crypto_sw_scheduler_per_thread_data_t * ptd,
171 : vnet_crypto_async_frame_elt_t * fe,
172 : u32 index, u32 bi,
173 : vnet_crypto_op_id_t op_id, u16 aad_len,
174 : u8 tag_len)
175 : {
176 47944 : vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177 47944 : vnet_crypto_op_t *op = 0;
178 :
179 47944 : if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
180 : {
181 23340 : vec_add2 (ptd->chained_crypto_ops, op, 1);
182 23340 : cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183 : fe->crypto_total_length);
184 : }
185 : else
186 : {
187 24604 : vec_add2 (ptd->crypto_ops, op, 1);
188 24604 : op->src = op->dst = b->data + fe->crypto_start_offset;
189 24604 : op->len = fe->crypto_total_length;
190 : }
191 :
192 47944 : op->op = op_id;
193 47944 : op->tag = fe->tag;
194 47944 : op->flags = fe->flags;
195 47944 : op->key_index = fe->key_index;
196 47944 : op->iv = fe->iv;
197 47944 : op->aad = fe->aad;
198 47944 : op->aad_len = aad_len;
199 47944 : op->tag_len = tag_len;
200 47944 : op->user_data = index;
201 47944 : }
202 :
203 : static_always_inline void
204 33300 : crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205 : crypto_sw_scheduler_per_thread_data_t
206 : * ptd, vnet_crypto_key_t * key,
207 : vnet_crypto_async_frame_elt_t * fe,
208 : u32 index, u32 bi,
209 : vnet_crypto_op_id_t crypto_op_id,
210 : vnet_crypto_op_id_t integ_op_id,
211 : u32 digest_len, u8 is_enc)
212 : {
213 33300 : vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214 33300 : vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
215 :
216 33300 : if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
217 : {
218 16080 : vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219 16080 : vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220 16080 : cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221 16080 : fe->crypto_start_offset,
222 : fe->crypto_total_length);
223 16080 : cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224 16080 : fe->integ_start_offset,
225 16080 : fe->crypto_total_length +
226 16080 : fe->integ_length_adj);
227 : }
228 : else
229 : {
230 17220 : vec_add2 (ptd->crypto_ops, crypto_op, 1);
231 17220 : vec_add2 (ptd->integ_ops, integ_op, 1);
232 17220 : crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233 17220 : crypto_op->len = fe->crypto_total_length;
234 17220 : integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235 17220 : integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
236 : }
237 :
238 33300 : crypto_op->op = crypto_op_id;
239 33300 : crypto_op->iv = fe->iv;
240 33300 : crypto_op->key_index = key->index_crypto;
241 33300 : crypto_op->user_data = 0;
242 33300 : crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243 33300 : integ_op->op = integ_op_id;
244 33300 : integ_op->digest = fe->digest;
245 33300 : integ_op->digest_len = digest_len;
246 33300 : integ_op->key_index = key->index_integ;
247 33300 : integ_op->flags = fe->flags;
248 33300 : crypto_op->user_data = integ_op->user_data = index;
249 33300 : }
250 :
251 : static_always_inline void
252 3912 : process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253 : vnet_crypto_op_t * ops, u8 * state)
254 : {
255 3912 : u32 n_fail, n_ops = vec_len (ops);
256 3912 : vnet_crypto_op_t *op = ops;
257 :
258 3912 : if (n_ops == 0)
259 1656 : return;
260 :
261 2256 : n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
262 :
263 : /*
264 : * If we had a failure in the ops then we need to walk all the ops
265 : * and set the status in the corresponding frame. This status is
266 : * not set in the case with no failures, as in that case the overall
267 : * frame status is success.
268 : */
269 2256 : if (n_fail)
270 : {
271 345 : for (int i = 0; i < n_ops; i++)
272 : {
273 295 : ASSERT (op - ops < n_ops);
274 :
275 295 : f->elts[op->user_data].status = op->status;
276 295 : op++;
277 : }
278 50 : *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
279 : }
280 : }
281 :
282 : static_always_inline void
283 3912 : process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
284 : vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
285 : u8 * state)
286 : {
287 3912 : u32 n_fail, n_ops = vec_len (ops);
288 3912 : vnet_crypto_op_t *op = ops;
289 :
290 3912 : if (n_ops == 0)
291 2248 : return;
292 :
293 1664 : n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
294 :
295 : /*
296 : * If we had a failure in the ops then we need to walk all the ops
297 : * and set the status in the corresponding frame. This status is
298 : * not set in the case with no failures, as in that case the overall
299 : * frame status is success.
300 : */
301 1664 : if (n_fail)
302 : {
303 0 : for (int i = 0; i < n_ops; i++)
304 : {
305 0 : ASSERT (op - ops < n_ops);
306 :
307 0 : f->elts[op->user_data].status = op->status;
308 0 : op++;
309 : }
310 0 : *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
311 : }
312 : }
313 :
314 : static_always_inline void
315 1672 : crypto_sw_scheduler_process_aead (vlib_main_t *vm,
316 : crypto_sw_scheduler_per_thread_data_t *ptd,
317 : vnet_crypto_async_frame_t *f, u32 aead_op,
318 : u32 aad_len, u32 digest_len)
319 : {
320 : vnet_crypto_async_frame_elt_t *fe;
321 : u32 *bi;
322 1672 : u32 n_elts = f->n_elts;
323 1672 : u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
324 :
325 1672 : vec_reset_length (ptd->crypto_ops);
326 1672 : vec_reset_length (ptd->integ_ops);
327 1672 : vec_reset_length (ptd->chained_crypto_ops);
328 1672 : vec_reset_length (ptd->chained_integ_ops);
329 1672 : vec_reset_length (ptd->chunks);
330 :
331 1672 : fe = f->elts;
332 1672 : bi = f->buffer_indices;
333 :
334 49616 : while (n_elts--)
335 : {
336 47944 : if (n_elts > 1)
337 44815 : clib_prefetch_load (fe + 1);
338 :
339 47944 : crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
340 : aead_op, aad_len, digest_len);
341 47944 : bi++;
342 47944 : fe++;
343 : }
344 :
345 1672 : process_ops (vm, f, ptd->crypto_ops, &state);
346 1672 : process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
347 : &state);
348 1672 : f->state = state;
349 1672 : }
350 :
351 : static_always_inline void
352 1120 : crypto_sw_scheduler_process_link (vlib_main_t *vm,
353 : crypto_sw_scheduler_main_t *cm,
354 : crypto_sw_scheduler_per_thread_data_t *ptd,
355 : vnet_crypto_async_frame_t *f, u32 crypto_op,
356 : u32 auth_op, u16 digest_len, u8 is_enc)
357 : {
358 : vnet_crypto_async_frame_elt_t *fe;
359 : u32 *bi;
360 1120 : u32 n_elts = f->n_elts;
361 1120 : u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
362 :
363 1120 : vec_reset_length (ptd->crypto_ops);
364 1120 : vec_reset_length (ptd->integ_ops);
365 1120 : vec_reset_length (ptd->chained_crypto_ops);
366 1120 : vec_reset_length (ptd->chained_integ_ops);
367 1120 : vec_reset_length (ptd->chunks);
368 1120 : fe = f->elts;
369 1120 : bi = f->buffer_indices;
370 :
371 34420 : while (n_elts--)
372 : {
373 33300 : if (n_elts > 1)
374 31198 : clib_prefetch_load (fe + 1);
375 :
376 33300 : crypto_sw_scheduler_convert_link_crypto (
377 33300 : vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0], crypto_op,
378 : auth_op, digest_len, is_enc);
379 33300 : bi++;
380 33300 : fe++;
381 : }
382 :
383 1120 : if (is_enc)
384 : {
385 566 : process_ops (vm, f, ptd->crypto_ops, &state);
386 566 : process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
387 : &state);
388 566 : process_ops (vm, f, ptd->integ_ops, &state);
389 566 : process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, &state);
390 : }
391 : else
392 : {
393 554 : process_ops (vm, f, ptd->integ_ops, &state);
394 554 : process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, &state);
395 554 : process_ops (vm, f, ptd->crypto_ops, &state);
396 554 : process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
397 : &state);
398 : }
399 :
400 1120 : f->state = state;
401 1120 : }
402 :
403 : static_always_inline int
404 2792 : convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id, u32 *crypto_op,
405 : u32 *auth_op_or_aad_len, u16 *digest_len, u8 *is_enc)
406 : {
407 2792 : switch (async_op_id)
408 : {
409 : #define _(n, s, k, t, a) \
410 : case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC: \
411 : *crypto_op = VNET_CRYPTO_OP_##n##_ENC; \
412 : *auth_op_or_aad_len = a; \
413 : *digest_len = t; \
414 : *is_enc = 1; \
415 : return 1; \
416 : case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC: \
417 : *crypto_op = VNET_CRYPTO_OP_##n##_DEC; \
418 : *auth_op_or_aad_len = a; \
419 : *digest_len = t; \
420 : *is_enc = 0; \
421 : return 1;
422 1672 : foreach_crypto_aead_async_alg
423 : #undef _
424 :
425 : #define _(c, h, s, k, d) \
426 : case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC: \
427 : *crypto_op = VNET_CRYPTO_OP_##c##_ENC; \
428 : *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
429 : *digest_len = d; \
430 : *is_enc = 1; \
431 : return 0; \
432 : case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC: \
433 : *crypto_op = VNET_CRYPTO_OP_##c##_DEC; \
434 : *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
435 : *digest_len = d; \
436 : *is_enc = 0; \
437 : return 0;
438 1120 : foreach_crypto_link_async_alg
439 : #undef _
440 :
441 0 : default : return -1;
442 : }
443 :
444 : return -1;
445 : }
446 :
447 : static_always_inline vnet_crypto_async_frame_t *
448 9872 : crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
449 : u32 *enqueue_thread_idx)
450 : {
451 9872 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
452 9872 : crypto_sw_scheduler_per_thread_data_t *ptd =
453 9872 : cm->per_thread_data + vm->thread_index;
454 9872 : vnet_crypto_async_frame_t *f = 0;
455 9872 : crypto_sw_scheduler_queue_t *current_queue = 0;
456 : u32 tail, head;
457 9872 : u8 found = 0;
458 :
459 : /* get a pending frame to process */
460 9872 : if (ptd->self_crypto_enabled)
461 : {
462 9857 : u32 i = ptd->last_serve_lcore_id + 1;
463 :
464 : while (1)
465 906 : {
466 : crypto_sw_scheduler_per_thread_data_t *st;
467 : u32 j;
468 :
469 10763 : if (i >= vec_len (cm->per_thread_data))
470 9852 : i = 0;
471 :
472 10763 : st = cm->per_thread_data + i;
473 :
474 10763 : if (ptd->last_serve_encrypt)
475 5371 : current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
476 : else
477 5392 : current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
478 :
479 10763 : tail = current_queue->tail;
480 10763 : head = current_queue->head;
481 :
482 : /* Skip this queue unless tail < head or head has overflowed
483 : * and tail has not. At the point where tail overflows (== 0),
484 : * the largest possible value of head is (queue size - 1).
485 : * Prior to that, the largest possible value of head is
486 : * (queue size - 2).
487 : */
488 10763 : if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK))
489 0 : goto skip_queue;
490 :
491 11599 : for (j = tail; j != head; j++)
492 : {
493 :
494 3628 : f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
495 :
496 3628 : if (!f)
497 0 : continue;
498 :
499 3628 : if (clib_atomic_bool_cmp_and_swap (
500 : &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
501 : VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
502 : {
503 2792 : found = 1;
504 2792 : break;
505 : }
506 : }
507 :
508 7971 : skip_queue:
509 10763 : if (found || i == ptd->last_serve_lcore_id)
510 : {
511 9857 : CLIB_MEMORY_STORE_BARRIER ();
512 9857 : ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
513 9857 : break;
514 : }
515 :
516 906 : i++;
517 : }
518 :
519 9857 : ptd->last_serve_lcore_id = i;
520 : }
521 :
522 9872 : if (found)
523 : {
524 : u32 crypto_op, auth_op_or_aad_len;
525 : u16 digest_len;
526 : u8 is_enc;
527 : int ret;
528 :
529 2792 : ret = convert_async_crypto_id (f->op, &crypto_op, &auth_op_or_aad_len,
530 : &digest_len, &is_enc);
531 :
532 2792 : if (ret == 1)
533 1672 : crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
534 : auth_op_or_aad_len, digest_len);
535 1120 : else if (ret == 0)
536 1120 : crypto_sw_scheduler_process_link (
537 : vm, cm, ptd, f, crypto_op, auth_op_or_aad_len, digest_len, is_enc);
538 :
539 2792 : *enqueue_thread_idx = f->enqueue_thread_index;
540 2792 : *nb_elts_processed = f->n_elts;
541 : }
542 :
543 9872 : if (ptd->last_return_queue)
544 : {
545 4924 : current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
546 4924 : ptd->last_return_queue = 0;
547 : }
548 : else
549 : {
550 4948 : current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
551 4948 : ptd->last_return_queue = 1;
552 : }
553 :
554 9872 : tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
555 :
556 9872 : if (current_queue->jobs[tail] &&
557 2792 : current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
558 : {
559 :
560 2792 : CLIB_MEMORY_STORE_BARRIER ();
561 2792 : current_queue->tail++;
562 2792 : f = current_queue->jobs[tail];
563 2792 : current_queue->jobs[tail] = 0;
564 :
565 2792 : return f;
566 : }
567 :
568 7080 : return 0;
569 : }
570 :
571 : static clib_error_t *
572 0 : sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
573 : vlib_cli_command_t * cmd)
574 : {
575 0 : unformat_input_t _line_input, *line_input = &_line_input;
576 : u32 worker_index;
577 : u8 crypto_enable;
578 : int rv;
579 :
580 : /* Get a line of input. */
581 0 : if (!unformat_user (input, unformat_line_input, line_input))
582 0 : return 0;
583 :
584 0 : while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
585 : {
586 0 : if (unformat (line_input, "worker %u", &worker_index))
587 : {
588 0 : if (unformat (line_input, "crypto"))
589 : {
590 0 : if (unformat (line_input, "on"))
591 0 : crypto_enable = 1;
592 0 : else if (unformat (line_input, "off"))
593 0 : crypto_enable = 0;
594 : else
595 0 : return (clib_error_return (0, "unknown input '%U'",
596 : format_unformat_error,
597 : line_input));
598 : }
599 : else
600 0 : return (clib_error_return (0, "unknown input '%U'",
601 : format_unformat_error, line_input));
602 : }
603 : else
604 0 : return (clib_error_return (0, "unknown input '%U'",
605 : format_unformat_error, line_input));
606 : }
607 :
608 0 : rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
609 0 : if (rv == VNET_API_ERROR_INVALID_VALUE)
610 : {
611 0 : return (clib_error_return (0, "invalid worker idx: %d", worker_index));
612 : }
613 0 : else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
614 : {
615 0 : return (clib_error_return (0, "cannot disable all crypto workers"));
616 : }
617 0 : return 0;
618 : }
619 :
620 : /*?
621 : * This command sets if worker will do crypto processing.
622 : *
623 : * @cliexpar
624 : * Example of how to set worker crypto processing off:
625 : * @cliexstart{set sw_scheduler worker 0 crypto off}
626 : * @cliexend
627 : ?*/
628 : /* *INDENT-OFF* */
629 255913 : VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
630 : .path = "set sw_scheduler",
631 : .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
632 : .function = sw_scheduler_set_worker_crypto,
633 : .is_mp_safe = 1,
634 : };
635 : /* *INDENT-ON* */
636 :
637 : static clib_error_t *
638 0 : sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
639 : vlib_cli_command_t * cmd)
640 : {
641 0 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
642 : u32 i;
643 :
644 0 : vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
645 0 : for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
646 : {
647 0 : vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
648 0 : (vlib_worker_threads + i)->name,
649 0 : cm->
650 0 : per_thread_data[i].self_crypto_enabled ? "on" : "off");
651 : }
652 :
653 0 : return 0;
654 : }
655 :
656 : /*?
657 : * This command displays sw_scheduler workers.
658 : *
659 : * @cliexpar
660 : * Example of how to show workers:
661 : * @cliexstart{show sw_scheduler workers}
662 : * @cliexend
663 : ?*/
664 : /* *INDENT-OFF* */
665 255913 : VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
666 : .path = "show sw_scheduler workers",
667 : .short_help = "show sw_scheduler workers",
668 : .function = sw_scheduler_show_workers,
669 : .is_mp_safe = 1,
670 : };
671 : /* *INDENT-ON* */
672 :
673 : clib_error_t *
674 575 : sw_scheduler_cli_init (vlib_main_t * vm)
675 : {
676 575 : return 0;
677 : }
678 :
679 1151 : VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
680 :
681 : crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
682 : clib_error_t *
683 575 : crypto_sw_scheduler_init (vlib_main_t * vm)
684 : {
685 575 : crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
686 575 : vlib_thread_main_t *tm = vlib_get_thread_main ();
687 575 : clib_error_t *error = 0;
688 : crypto_sw_scheduler_per_thread_data_t *ptd;
689 : u32 i;
690 :
691 575 : vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
692 : CLIB_CACHE_LINE_BYTES);
693 :
694 1205 : for (i = 0; i < tm->n_vlib_mains; i++)
695 : {
696 630 : ptd = cm->per_thread_data + i;
697 630 : ptd->self_crypto_enabled = i > 0 || vlib_num_workers () < 1;
698 :
699 630 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
700 630 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
701 :
702 630 : vec_validate_aligned (
703 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
704 : CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1, CLIB_CACHE_LINE_BYTES);
705 :
706 630 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
707 630 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
708 :
709 630 : ptd->last_serve_encrypt = 0;
710 630 : ptd->last_return_queue = 0;
711 :
712 630 : vec_validate_aligned (
713 : ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
714 : CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1, CLIB_CACHE_LINE_BYTES);
715 : }
716 :
717 575 : cm->crypto_engine_index =
718 575 : vnet_crypto_register_engine (vm, "sw_scheduler", 100,
719 : "SW Scheduler Async Engine");
720 :
721 575 : vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
722 : crypto_sw_scheduler_key_handler);
723 :
724 575 : crypto_sw_scheduler_api_init (vm);
725 :
726 : /* *INDENT-OFF* */
727 : #define _(n, s, k, t, a) \
728 : vnet_crypto_register_enqueue_handler ( \
729 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
730 : crypto_sw_scheduler_frame_enqueue_encrypt); \
731 : vnet_crypto_register_enqueue_handler ( \
732 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
733 : crypto_sw_scheduler_frame_enqueue_decrypt);
734 575 : foreach_crypto_aead_async_alg
735 : #undef _
736 :
737 : #define _(c, h, s, k, d) \
738 : vnet_crypto_register_enqueue_handler ( \
739 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
740 : crypto_sw_scheduler_frame_enqueue_encrypt); \
741 : vnet_crypto_register_enqueue_handler ( \
742 : vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
743 : crypto_sw_scheduler_frame_enqueue_decrypt);
744 575 : foreach_crypto_link_async_alg
745 : #undef _
746 : /* *INDENT-ON* */
747 :
748 575 : vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
749 : crypto_sw_scheduler_dequeue);
750 :
751 575 : if (error)
752 0 : vec_free (cm->per_thread_data);
753 :
754 575 : return error;
755 : }
756 :
757 : /* *INDENT-OFF* */
758 1151 : VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
759 : .runs_after = VLIB_INITS ("vnet_crypto_init"),
760 : };
761 :
762 : VLIB_PLUGIN_REGISTER () = {
763 : .version = VPP_BUILD_VER,
764 : .description = "SW Scheduler Crypto Async Engine plugin",
765 : };
766 : /* *INDENT-ON* */
767 :
768 : /*
769 : * fd.io coding-style-patch-verification: ON
770 : *
771 : * Local Variables:
772 : * eval: (c-set-style "gnu")
773 : * End:
774 : */
|