Line data Source code
1 : /*
2 : * Copyright (c) 2021 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <quic/quic.h>
17 : #include <quic/quic_crypto.h>
18 : #include <vnet/session/session.h>
19 :
20 : #include <quicly.h>
21 : #include <picotls/openssl.h>
22 : #include <pthread.h>
23 :
24 : #define QUICLY_EPOCH_1RTT 3
25 :
26 : extern quic_main_t quic_main;
27 : extern quic_ctx_t *quic_get_conn_ctx (quicly_conn_t *conn);
28 : vnet_crypto_main_t *cm = &crypto_main;
29 :
30 : typedef struct crypto_key_
31 : {
32 : vnet_crypto_alg_t algo;
33 : u8 key[32];
34 : u16 key_len;
35 : } crypto_key_t;
36 :
37 : struct cipher_context_t
38 : {
39 : ptls_cipher_context_t super;
40 : vnet_crypto_op_t op;
41 : vnet_crypto_op_id_t id;
42 : crypto_key_t key;
43 : };
44 :
45 : struct aead_crypto_context_t
46 : {
47 : ptls_aead_context_t super;
48 : EVP_CIPHER_CTX *evp_ctx;
49 : uint8_t static_iv[PTLS_MAX_IV_SIZE];
50 : vnet_crypto_op_t op;
51 : crypto_key_t key;
52 :
53 : vnet_crypto_op_id_t id;
54 : uint8_t iv[PTLS_MAX_IV_SIZE];
55 : };
56 :
57 : static int
58 180 : quic_crypto_setup_cipher (quicly_crypto_engine_t *engine, quicly_conn_t *conn,
59 : size_t epoch, int is_enc,
60 : ptls_cipher_context_t **header_protect_ctx,
61 : ptls_aead_context_t **packet_protect_ctx,
62 : ptls_aead_algorithm_t *aead,
63 : ptls_hash_algorithm_t *hash, const void *secret)
64 : {
65 : uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
66 : int ret;
67 :
68 180 : *packet_protect_ctx = NULL;
69 : /* generate new header protection key */
70 180 : if (header_protect_ctx != NULL)
71 : {
72 180 : *header_protect_ctx = NULL;
73 : ret =
74 180 : ptls_hkdf_expand_label (hash, hpkey, aead->ctr_cipher->key_size,
75 : ptls_iovec_init (secret, hash->digest_size),
76 : "quic hp", ptls_iovec_init (NULL, 0), NULL);
77 180 : if (ret)
78 0 : goto Exit;
79 180 : *header_protect_ctx = ptls_cipher_new (aead->ctr_cipher, is_enc, hpkey);
80 180 : if (NULL == *header_protect_ctx)
81 : {
82 0 : ret = PTLS_ERROR_NO_MEMORY;
83 0 : goto Exit;
84 : }
85 : }
86 :
87 : /* generate new AEAD context */
88 180 : *packet_protect_ctx =
89 180 : ptls_aead_new (aead, hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL);
90 180 : if (NULL == *packet_protect_ctx)
91 : {
92 0 : ret = PTLS_ERROR_NO_MEMORY;
93 0 : goto Exit;
94 : }
95 :
96 180 : if (epoch == QUICLY_EPOCH_1RTT && !is_enc)
97 : {
98 36 : quic_ctx_t *qctx = quic_get_conn_ctx (conn);
99 36 : if (qctx->ingress_keys.aead_ctx != NULL)
100 0 : qctx->key_phase_ingress++;
101 :
102 36 : qctx->ingress_keys.aead_ctx = *packet_protect_ctx;
103 36 : if (header_protect_ctx != NULL)
104 36 : qctx->ingress_keys.hp_ctx = *header_protect_ctx;
105 : }
106 :
107 180 : ret = 0;
108 :
109 180 : Exit:
110 180 : if (ret)
111 : {
112 0 : if (*packet_protect_ctx != NULL)
113 : {
114 0 : ptls_aead_free (*packet_protect_ctx);
115 0 : *packet_protect_ctx = NULL;
116 : }
117 0 : if (header_protect_ctx && *header_protect_ctx != NULL)
118 : {
119 0 : ptls_cipher_free (*header_protect_ctx);
120 0 : *header_protect_ctx = NULL;
121 : }
122 : }
123 180 : ptls_clear_memory (hpkey, sizeof (hpkey));
124 180 : return ret;
125 : }
126 :
127 : static u32
128 148831 : quic_crypto_set_key (crypto_key_t *key)
129 : {
130 148831 : u8 thread_index = vlib_get_thread_index ();
131 148831 : u32 key_id = quic_main.per_thread_crypto_key_indices[thread_index];
132 148831 : vnet_crypto_key_t *vnet_key = vnet_crypto_get_key (key_id);
133 148831 : vlib_main_t *vm = vlib_get_main ();
134 : vnet_crypto_engine_t *engine;
135 :
136 744155 : vec_foreach (engine, cm->engines)
137 595324 : if (engine->key_op_handler)
138 446493 : engine->key_op_handler (vm, VNET_CRYPTO_KEY_OP_DEL, key_id);
139 :
140 148831 : vnet_key->alg = key->algo;
141 148831 : clib_memcpy (vnet_key->data, key->key, key->key_len);
142 :
143 744155 : vec_foreach (engine, cm->engines)
144 595324 : if (engine->key_op_handler)
145 446493 : engine->key_op_handler (vm, VNET_CRYPTO_KEY_OP_ADD, key_id);
146 :
147 148831 : return key_id;
148 : }
149 :
150 : static size_t
151 49481 : quic_crypto_aead_decrypt (quic_ctx_t *qctx, ptls_aead_context_t *_ctx,
152 : void *_output, const void *input, size_t inlen,
153 : uint64_t decrypted_pn, const void *aad,
154 : size_t aadlen)
155 : {
156 49481 : vlib_main_t *vm = vlib_get_main ();
157 :
158 49481 : struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
159 :
160 49481 : vnet_crypto_op_init (&ctx->op, ctx->id);
161 49481 : ctx->op.aad = (u8 *) aad;
162 49481 : ctx->op.aad_len = aadlen;
163 49481 : ctx->op.iv = ctx->iv;
164 49481 : ptls_aead__build_iv (ctx->super.algo, ctx->op.iv, ctx->static_iv,
165 : decrypted_pn);
166 49481 : ctx->op.src = (u8 *) input;
167 49481 : ctx->op.dst = _output;
168 49481 : ctx->op.key_index = quic_crypto_set_key (&ctx->key);
169 49481 : ctx->op.len = inlen - ctx->super.algo->tag_size;
170 49481 : ctx->op.tag_len = ctx->super.algo->tag_size;
171 49481 : ctx->op.tag = ctx->op.src + ctx->op.len;
172 :
173 49481 : vnet_crypto_process_ops (vm, &(ctx->op), 1);
174 :
175 49481 : return ctx->op.len;
176 : }
177 :
178 : void
179 49585 : quic_crypto_decrypt_packet (quic_ctx_t *qctx, quic_rx_packet_ctx_t *pctx)
180 : {
181 49585 : ptls_cipher_context_t *header_protection = NULL;
182 49585 : ptls_aead_context_t *aead = NULL;
183 : int pn;
184 :
185 : /* Long Header packets are not decrypted by vpp */
186 49585 : if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
187 104 : return;
188 :
189 : uint64_t next_expected_packet_number =
190 49488 : quicly_get_next_expected_packet_number (qctx->conn);
191 49488 : if (next_expected_packet_number == UINT64_MAX)
192 7 : return;
193 :
194 49481 : aead = qctx->ingress_keys.aead_ctx;
195 49481 : header_protection = qctx->ingress_keys.hp_ctx;
196 :
197 49481 : if (!aead || !header_protection)
198 0 : return;
199 :
200 49481 : size_t encrypted_len = pctx->packet.octets.len - pctx->packet.encrypted_off;
201 49481 : uint8_t hpmask[5] = { 0 };
202 49481 : uint32_t pnbits = 0;
203 : size_t pnlen, ptlen, i;
204 :
205 : /* decipher the header protection, as well as obtaining pnbits, pnlen */
206 49481 : if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE)
207 0 : return;
208 49481 : ptls_cipher_init (header_protection, pctx->packet.octets.base +
209 49481 : pctx->packet.encrypted_off +
210 : QUICLY_MAX_PN_SIZE);
211 49481 : ptls_cipher_encrypt (header_protection, hpmask, hpmask, sizeof (hpmask));
212 98962 : pctx->packet.octets.base[0] ^=
213 49481 : hpmask[0] &
214 49481 : (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf : 0x1f);
215 49481 : pnlen = (pctx->packet.octets.base[0] & 0x3) + 1;
216 148443 : for (i = 0; i != pnlen; ++i)
217 : {
218 98962 : pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
219 98962 : hpmask[i + 1];
220 98962 : pnbits = (pnbits << 8) |
221 98962 : pctx->packet.octets.base[pctx->packet.encrypted_off + i];
222 : }
223 :
224 49481 : size_t aead_off = pctx->packet.encrypted_off + pnlen;
225 :
226 49481 : pn = quicly_determine_packet_number (pnbits, pnlen * 8,
227 : next_expected_packet_number);
228 :
229 49481 : int key_phase_bit =
230 49481 : (pctx->packet.octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
231 :
232 49481 : if (key_phase_bit != (qctx->key_phase_ingress & 1))
233 : {
234 0 : pctx->packet.octets.base[0] ^=
235 0 : hpmask[0] &
236 0 : (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf :
237 : 0x1f);
238 0 : for (i = 0; i != pnlen; ++i)
239 : {
240 0 : pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
241 0 : hpmask[i + 1];
242 : }
243 0 : return;
244 : }
245 :
246 49481 : if ((ptlen = quic_crypto_aead_decrypt (
247 49481 : qctx, aead, pctx->packet.octets.base + aead_off,
248 49481 : pctx->packet.octets.base + aead_off,
249 49481 : pctx->packet.octets.len - aead_off, pn, pctx->packet.octets.base,
250 : aead_off)) == SIZE_MAX)
251 : {
252 0 : fprintf (stderr, "%s: aead decryption failure (pn: %d)\n", __FUNCTION__,
253 : pn);
254 0 : return;
255 : }
256 :
257 49481 : pctx->packet.encrypted_off = aead_off;
258 49481 : pctx->packet.octets.len = ptlen + aead_off;
259 :
260 49481 : pctx->packet.decrypted.pn = pn;
261 49481 : pctx->packet.decrypted.key_phase = qctx->key_phase_ingress;
262 : }
263 :
264 : void
265 49675 : quic_crypto_encrypt_packet (struct st_quicly_crypto_engine_t *engine,
266 : quicly_conn_t *conn,
267 : ptls_cipher_context_t *header_protect_ctx,
268 : ptls_aead_context_t *packet_protect_ctx,
269 : ptls_iovec_t datagram, size_t first_byte_at,
270 : size_t payload_from, uint64_t packet_number,
271 : int coalesced)
272 : {
273 49675 : vlib_main_t *vm = vlib_get_main ();
274 :
275 49675 : struct cipher_context_t *hp_ctx =
276 : (struct cipher_context_t *) header_protect_ctx;
277 49675 : struct aead_crypto_context_t *aead_ctx =
278 : (struct aead_crypto_context_t *) packet_protect_ctx;
279 :
280 49675 : void *input = datagram.base + payload_from;
281 49675 : void *output = input;
282 49675 : size_t inlen =
283 49675 : datagram.len - payload_from - packet_protect_ctx->algo->tag_size;
284 49675 : const void *aad = datagram.base + first_byte_at;
285 49675 : size_t aadlen = payload_from - first_byte_at;
286 :
287 : /* Build AEAD encrypt crypto operation */
288 49675 : vnet_crypto_op_init (&aead_ctx->op, aead_ctx->id);
289 49675 : aead_ctx->op.aad = (u8 *) aad;
290 49675 : aead_ctx->op.aad_len = aadlen;
291 49675 : aead_ctx->op.iv = aead_ctx->iv;
292 49675 : ptls_aead__build_iv (aead_ctx->super.algo, aead_ctx->op.iv,
293 49675 : aead_ctx->static_iv, packet_number);
294 49675 : aead_ctx->op.key_index = quic_crypto_set_key (&aead_ctx->key);
295 49675 : aead_ctx->op.src = (u8 *) input;
296 49675 : aead_ctx->op.dst = output;
297 49675 : aead_ctx->op.len = inlen;
298 49675 : aead_ctx->op.tag_len = aead_ctx->super.algo->tag_size;
299 49675 : aead_ctx->op.tag = aead_ctx->op.src + inlen;
300 49675 : vnet_crypto_process_ops (vm, &(aead_ctx->op), 1);
301 49675 : assert (aead_ctx->op.status == VNET_CRYPTO_OP_STATUS_COMPLETED);
302 :
303 : /* Build Header protection crypto operation */
304 49675 : ptls_aead_supplementary_encryption_t supp = {
305 : .ctx = header_protect_ctx,
306 : .input =
307 49675 : datagram.base + payload_from - QUICLY_SEND_PN_SIZE + QUICLY_MAX_PN_SIZE
308 : };
309 :
310 : /* Build Header protection crypto operation */
311 49675 : vnet_crypto_op_init (&hp_ctx->op, hp_ctx->id);
312 49675 : memset (supp.output, 0, sizeof (supp.output));
313 49675 : hp_ctx->op.iv = (u8 *) supp.input;
314 49675 : hp_ctx->op.key_index = quic_crypto_set_key (&hp_ctx->key);
315 : ;
316 49675 : hp_ctx->op.src = (u8 *) supp.output;
317 49675 : hp_ctx->op.dst = (u8 *) supp.output;
318 49675 : hp_ctx->op.len = sizeof (supp.output);
319 49675 : vnet_crypto_process_ops (vm, &(hp_ctx->op), 1);
320 49675 : assert (hp_ctx->op.status == VNET_CRYPTO_OP_STATUS_COMPLETED);
321 :
322 99350 : datagram.base[first_byte_at] ^=
323 49675 : supp.output[0] &
324 49675 : (QUICLY_PACKET_IS_LONG_HEADER (datagram.base[first_byte_at]) ? 0xf : 0x1f);
325 149025 : for (size_t i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
326 99350 : datagram.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^=
327 99350 : supp.output[i + 1];
328 49675 : }
329 :
330 : static int
331 216 : quic_crypto_cipher_setup_crypto (ptls_cipher_context_t *_ctx, int is_enc,
332 : const void *key, const EVP_CIPHER *cipher)
333 : {
334 216 : struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
335 :
336 : vnet_crypto_alg_t algo;
337 216 : if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
338 : {
339 72 : algo = VNET_CRYPTO_ALG_AES_128_CTR;
340 72 : ctx->id = is_enc ? VNET_CRYPTO_OP_AES_128_CTR_ENC :
341 : VNET_CRYPTO_OP_AES_128_CTR_DEC;
342 72 : ptls_openssl_aes128ctr.setup_crypto (_ctx, is_enc, key);
343 : }
344 144 : else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
345 : {
346 144 : algo = VNET_CRYPTO_ALG_AES_256_CTR;
347 144 : ctx->id = is_enc ? VNET_CRYPTO_OP_AES_256_CTR_ENC :
348 : VNET_CRYPTO_OP_AES_256_CTR_DEC;
349 144 : ptls_openssl_aes256ctr.setup_crypto (_ctx, is_enc, key);
350 : }
351 : else
352 : {
353 : QUIC_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
354 : _ctx->algo->name);
355 0 : assert (0);
356 : }
357 :
358 216 : if (quic_main.vnet_crypto_enabled)
359 : {
360 : // ctx->key_index =
361 : // quic_crypto_go_setup_key (algo, key, _ctx->algo->key_size);
362 216 : ctx->key.algo = algo;
363 216 : ctx->key.key_len = _ctx->algo->key_size;
364 216 : assert (ctx->key.key_len <= 32);
365 216 : clib_memcpy (&ctx->key.key, key, ctx->key.key_len);
366 : }
367 :
368 216 : return 0;
369 : }
370 :
371 : static int
372 72 : quic_crypto_aes128ctr_setup_crypto (ptls_cipher_context_t *ctx, int is_enc,
373 : const void *key)
374 : {
375 72 : return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_128_ctr ());
376 : }
377 :
378 : static int
379 144 : quic_crypto_aes256ctr_setup_crypto (ptls_cipher_context_t *ctx, int is_enc,
380 : const void *key)
381 : {
382 144 : return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_256_ctr ());
383 : }
384 :
385 : static int
386 216 : quic_crypto_aead_setup_crypto (ptls_aead_context_t *_ctx, int is_enc,
387 : const void *key, const void *iv,
388 : const EVP_CIPHER *cipher)
389 : {
390 216 : struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
391 :
392 : vnet_crypto_alg_t algo;
393 216 : if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
394 : {
395 72 : algo = VNET_CRYPTO_ALG_AES_128_GCM;
396 72 : ctx->id = is_enc ? VNET_CRYPTO_OP_AES_128_GCM_ENC :
397 : VNET_CRYPTO_OP_AES_128_GCM_DEC;
398 72 : ptls_openssl_aes128gcm.setup_crypto (_ctx, is_enc, key, iv);
399 : }
400 144 : else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
401 : {
402 144 : algo = VNET_CRYPTO_ALG_AES_256_GCM;
403 144 : ctx->id = is_enc ? VNET_CRYPTO_OP_AES_256_GCM_ENC :
404 : VNET_CRYPTO_OP_AES_256_GCM_DEC;
405 144 : ptls_openssl_aes256gcm.setup_crypto (_ctx, is_enc, key, iv);
406 : }
407 : else
408 : {
409 : QUIC_DBG (1, "%s, invalied aead cipher %s", __FUNCTION__,
410 : _ctx->algo->name);
411 0 : assert (0);
412 : }
413 :
414 216 : if (quic_main.vnet_crypto_enabled)
415 : {
416 216 : clib_memcpy (ctx->static_iv, iv, ctx->super.algo->iv_size);
417 : // ctx->key_index =
418 : // quic_crypto_go_setup_key (algo, key, _ctx->algo->key_size);
419 216 : ctx->key.algo = algo;
420 216 : ctx->key.key_len = _ctx->algo->key_size;
421 216 : assert (ctx->key.key_len <= 32);
422 216 : clib_memcpy (&ctx->key.key, key, ctx->key.key_len);
423 : }
424 :
425 216 : return 0;
426 : }
427 :
428 : static int
429 72 : quic_crypto_aead_aes128gcm_setup_crypto (ptls_aead_context_t *ctx, int is_enc,
430 : const void *key, const void *iv)
431 : {
432 72 : return quic_crypto_aead_setup_crypto (ctx, is_enc, key, iv,
433 : EVP_aes_128_gcm ());
434 : }
435 :
436 : static int
437 144 : quic_crypto_aead_aes256gcm_setup_crypto (ptls_aead_context_t *ctx, int is_enc,
438 : const void *key, const void *iv)
439 : {
440 144 : return quic_crypto_aead_setup_crypto (ctx, is_enc, key, iv,
441 : EVP_aes_256_gcm ());
442 : }
443 :
444 : int
445 0 : quic_encrypt_ticket_cb (ptls_encrypt_ticket_t *_self, ptls_t *tls,
446 : int is_encrypt, ptls_buffer_t *dst, ptls_iovec_t src)
447 : {
448 0 : quic_session_cache_t *self = (void *) _self;
449 : int ret;
450 :
451 0 : if (is_encrypt)
452 : {
453 :
454 : /* replace the cached entry along with a newly generated session id */
455 0 : clib_mem_free (self->data.base);
456 0 : if ((self->data.base = clib_mem_alloc (src.len)) == NULL)
457 0 : return PTLS_ERROR_NO_MEMORY;
458 :
459 0 : ptls_get_context (tls)->random_bytes (self->id, sizeof (self->id));
460 0 : clib_memcpy (self->data.base, src.base, src.len);
461 0 : self->data.len = src.len;
462 :
463 : /* store the session id in buffer */
464 0 : if ((ret = ptls_buffer_reserve (dst, sizeof (self->id))) != 0)
465 0 : return ret;
466 0 : clib_memcpy (dst->base + dst->off, self->id, sizeof (self->id));
467 0 : dst->off += sizeof (self->id);
468 : }
469 : else
470 : {
471 : /* check if session id is the one stored in cache */
472 0 : if (src.len != sizeof (self->id))
473 0 : return PTLS_ERROR_SESSION_NOT_FOUND;
474 0 : if (clib_memcmp (self->id, src.base, sizeof (self->id)) != 0)
475 0 : return PTLS_ERROR_SESSION_NOT_FOUND;
476 :
477 : /* return the cached value */
478 0 : if ((ret = ptls_buffer_reserve (dst, self->data.len)) != 0)
479 0 : return ret;
480 0 : clib_memcpy (dst->base + dst->off, self->data.base, self->data.len);
481 0 : dst->off += self->data.len;
482 : }
483 :
484 0 : return 0;
485 : }
486 :
487 : ptls_cipher_algorithm_t quic_crypto_aes128ctr = {
488 : "AES128-CTR",
489 : PTLS_AES128_KEY_SIZE,
490 : 1,
491 : PTLS_AES_IV_SIZE,
492 : sizeof (struct cipher_context_t),
493 : quic_crypto_aes128ctr_setup_crypto
494 : };
495 :
496 : ptls_cipher_algorithm_t quic_crypto_aes256ctr = {
497 : "AES256-CTR",
498 : PTLS_AES256_KEY_SIZE,
499 : 1 /* block size */,
500 : PTLS_AES_IV_SIZE,
501 : sizeof (struct cipher_context_t),
502 : quic_crypto_aes256ctr_setup_crypto
503 : };
504 :
505 : #define PTLS_X86_CACHE_LINE_ALIGN_BITS 6
506 : ptls_aead_algorithm_t quic_crypto_aes128gcm = {
507 : "AES128-GCM",
508 : PTLS_AESGCM_CONFIDENTIALITY_LIMIT,
509 : PTLS_AESGCM_INTEGRITY_LIMIT,
510 : &quic_crypto_aes128ctr,
511 : &ptls_openssl_aes128ecb,
512 : PTLS_AES128_KEY_SIZE,
513 : PTLS_AESGCM_IV_SIZE,
514 : PTLS_AESGCM_TAG_SIZE,
515 : { PTLS_TLS12_AESGCM_FIXED_IV_SIZE, PTLS_TLS12_AESGCM_RECORD_IV_SIZE },
516 : 1,
517 : PTLS_X86_CACHE_LINE_ALIGN_BITS,
518 : sizeof (struct aead_crypto_context_t),
519 : quic_crypto_aead_aes128gcm_setup_crypto
520 : };
521 :
522 : ptls_aead_algorithm_t quic_crypto_aes256gcm = {
523 : "AES256-GCM",
524 : PTLS_AESGCM_CONFIDENTIALITY_LIMIT,
525 : PTLS_AESGCM_INTEGRITY_LIMIT,
526 : &quic_crypto_aes256ctr,
527 : &ptls_openssl_aes256ecb,
528 : PTLS_AES256_KEY_SIZE,
529 : PTLS_AESGCM_IV_SIZE,
530 : PTLS_AESGCM_TAG_SIZE,
531 : { PTLS_TLS12_AESGCM_FIXED_IV_SIZE, PTLS_TLS12_AESGCM_RECORD_IV_SIZE },
532 : 1,
533 : PTLS_X86_CACHE_LINE_ALIGN_BITS,
534 : sizeof (struct aead_crypto_context_t),
535 : quic_crypto_aead_aes256gcm_setup_crypto
536 : };
537 :
538 : ptls_cipher_suite_t quic_crypto_aes128gcmsha256 = {
539 : PTLS_CIPHER_SUITE_AES_128_GCM_SHA256, &quic_crypto_aes128gcm,
540 : &ptls_openssl_sha256
541 : };
542 :
543 : ptls_cipher_suite_t quic_crypto_aes256gcmsha384 = {
544 : PTLS_CIPHER_SUITE_AES_256_GCM_SHA384, &quic_crypto_aes256gcm,
545 : &ptls_openssl_sha384
546 : };
547 :
548 : ptls_cipher_suite_t *quic_crypto_cipher_suites[] = {
549 : &quic_crypto_aes256gcmsha384, &quic_crypto_aes128gcmsha256, NULL
550 : };
551 :
552 : quicly_crypto_engine_t quic_crypto_engine = { quic_crypto_setup_cipher,
553 : quic_crypto_encrypt_packet };
554 :
555 : /*
556 : * fd.io coding-style-patch-verification: ON
557 : *
558 : * Local Variables:
559 : * eval: (c-set-style "gnu")
560 : * End:
561 : */
|