1 /*
2 * Copyright 2001-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /*
11 * This file uses the low-level AES functions (which are deprecated for
12 * non-internal use) in order to implement the EVP AES ciphers.
13 */
14 #include "internal/deprecated.h"
15
16 #include <string.h>
17 #include <assert.h>
18 #include <openssl/opensslconf.h>
19 #include <openssl/crypto.h>
20 #include <openssl/evp.h>
21 #include <openssl/err.h>
22 #include <openssl/aes.h>
23 #include <openssl/rand.h>
24 #include <openssl/cmac.h>
25 #include "crypto/evp.h"
26 #include "internal/cryptlib.h"
27 #include "crypto/modes.h"
28 #include "crypto/siv.h"
29 #include "crypto/aes_platform.h"
30 #include "evp_local.h"
31
32 typedef struct {
33 union {
34 OSSL_UNION_ALIGN;
35 AES_KEY ks;
36 } ks;
37 block128_f block;
38 union {
39 cbc128_f cbc;
40 ctr128_f ctr;
41 } stream;
42 } EVP_AES_KEY;
43
44 typedef struct {
45 union {
46 OSSL_UNION_ALIGN;
47 AES_KEY ks;
48 } ks; /* AES key schedule to use */
49 int key_set; /* Set if key initialised */
50 int iv_set; /* Set if an iv is set */
51 GCM128_CONTEXT gcm;
52 unsigned char *iv; /* Temporary IV store */
53 int ivlen; /* IV length */
54 int taglen;
55 int iv_gen; /* It is OK to generate IVs */
56 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
57 int tls_aad_len; /* TLS AAD length */
58 uint64_t tls_enc_records; /* Number of TLS records encrypted */
59 ctr128_f ctr;
60 } EVP_AES_GCM_CTX;
61
62 typedef struct {
63 union {
64 OSSL_UNION_ALIGN;
65 AES_KEY ks;
66 } ks1, ks2; /* AES key schedules to use */
67 XTS128_CONTEXT xts;
68 void (*stream) (const unsigned char *in,
69 unsigned char *out, size_t length,
70 const AES_KEY *key1, const AES_KEY *key2,
71 const unsigned char iv[16]);
72 } EVP_AES_XTS_CTX;
73
74 #ifdef FIPS_MODULE
75 static const int allow_insecure_decrypt = 0;
76 #else
77 static const int allow_insecure_decrypt = 1;
78 #endif
79
80 typedef struct {
81 union {
82 OSSL_UNION_ALIGN;
83 AES_KEY ks;
84 } ks; /* AES key schedule to use */
85 int key_set; /* Set if key initialised */
86 int iv_set; /* Set if an iv is set */
87 int tag_set; /* Set if tag is valid */
88 int len_set; /* Set if message length set */
89 int L, M; /* L and M parameters from RFC3610 */
90 int tls_aad_len; /* TLS AAD length */
91 CCM128_CONTEXT ccm;
92 ccm128_f str;
93 } EVP_AES_CCM_CTX;
94
95 #ifndef OPENSSL_NO_OCB
96 typedef struct {
97 union {
98 OSSL_UNION_ALIGN;
99 AES_KEY ks;
100 } ksenc; /* AES key schedule to use for encryption */
101 union {
102 OSSL_UNION_ALIGN;
103 AES_KEY ks;
104 } ksdec; /* AES key schedule to use for decryption */
105 int key_set; /* Set if key initialised */
106 int iv_set; /* Set if an iv is set */
107 OCB128_CONTEXT ocb;
108 unsigned char *iv; /* Temporary IV store */
109 unsigned char tag[16];
110 unsigned char data_buf[16]; /* Store partial data blocks */
111 unsigned char aad_buf[16]; /* Store partial AAD blocks */
112 int data_buf_len;
113 int aad_buf_len;
114 int ivlen; /* IV length */
115 int taglen;
116 } EVP_AES_OCB_CTX;
117 #endif
118
119 #define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
120
121 /* increment counter (64-bit int) by 1 */
ctr64_inc(unsigned char * counter)122 static void ctr64_inc(unsigned char *counter)
123 {
124 int n = 8;
125 unsigned char c;
126
127 do {
128 --n;
129 c = counter[n];
130 ++c;
131 counter[n] = c;
132 if (c)
133 return;
134 } while (n);
135 }
136
137 #if defined(AESNI_CAPABLE)
138 # if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
139 # define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
140 gctx->gcm.ghash==gcm_ghash_avx)
141 # undef AES_GCM_ASM2 /* minor size optimization */
142 # endif
143
aesni_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)144 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
145 const unsigned char *iv, int enc)
146 {
147 int ret, mode;
148 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
149 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
150
151 if (keylen <= 0) {
152 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
153 return 0;
154 }
155 mode = EVP_CIPHER_CTX_get_mode(ctx);
156 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
157 && !enc) {
158 ret = aesni_set_decrypt_key(key, keylen, &dat->ks.ks);
159 dat->block = (block128_f) aesni_decrypt;
160 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
161 (cbc128_f) aesni_cbc_encrypt : NULL;
162 } else {
163 ret = aesni_set_encrypt_key(key, keylen, &dat->ks.ks);
164 dat->block = (block128_f) aesni_encrypt;
165 if (mode == EVP_CIPH_CBC_MODE)
166 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
167 else if (mode == EVP_CIPH_CTR_MODE)
168 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
169 else
170 dat->stream.cbc = NULL;
171 }
172
173 if (ret < 0) {
174 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
175 return 0;
176 }
177
178 return 1;
179 }
180
aesni_cbc_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)181 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
182 const unsigned char *in, size_t len)
183 {
184 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
185 ctx->iv, EVP_CIPHER_CTX_is_encrypting(ctx));
186
187 return 1;
188 }
189
aesni_ecb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)190 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
191 const unsigned char *in, size_t len)
192 {
193 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
194
195 if (len < bl)
196 return 1;
197
198 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
199 EVP_CIPHER_CTX_is_encrypting(ctx));
200
201 return 1;
202 }
203
204 # define aesni_ofb_cipher aes_ofb_cipher
205 static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
206 const unsigned char *in, size_t len);
207
208 # define aesni_cfb_cipher aes_cfb_cipher
209 static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
210 const unsigned char *in, size_t len);
211
212 # define aesni_cfb8_cipher aes_cfb8_cipher
213 static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
214 const unsigned char *in, size_t len);
215
216 # define aesni_cfb1_cipher aes_cfb1_cipher
217 static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
218 const unsigned char *in, size_t len);
219
220 # define aesni_ctr_cipher aes_ctr_cipher
221 static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
222 const unsigned char *in, size_t len);
223
aesni_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)224 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
225 const unsigned char *iv, int enc)
226 {
227 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX, ctx);
228
229 if (iv == NULL && key == NULL)
230 return 1;
231
232 if (key) {
233 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
234
235 if (keylen <= 0) {
236 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
237 return 0;
238 }
239 aesni_set_encrypt_key(key, keylen, &gctx->ks.ks);
240 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
241 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
242 /*
243 * If we have an iv can set it directly, otherwise use saved IV.
244 */
245 if (iv == NULL && gctx->iv_set)
246 iv = gctx->iv;
247 if (iv) {
248 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
249 gctx->iv_set = 1;
250 }
251 gctx->key_set = 1;
252 } else {
253 /* If key set use IV, otherwise copy */
254 if (gctx->key_set)
255 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
256 else
257 memcpy(gctx->iv, iv, gctx->ivlen);
258 gctx->iv_set = 1;
259 gctx->iv_gen = 0;
260 }
261 return 1;
262 }
263
264 # define aesni_gcm_cipher aes_gcm_cipher
265 static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
266 const unsigned char *in, size_t len);
267
aesni_xts_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)268 static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
269 const unsigned char *iv, int enc)
270 {
271 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
272
273 if (iv == NULL && key == NULL)
274 return 1;
275
276 if (key) {
277 /* The key is two half length keys in reality */
278 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
279 const int bytes = keylen / 2;
280 const int bits = bytes * 8;
281
282 if (keylen <= 0) {
283 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
284 return 0;
285 }
286 /*
287 * Verify that the two keys are different.
288 *
289 * This addresses Rogaway's vulnerability.
290 * See comment in aes_xts_init_key() below.
291 */
292 if ((!allow_insecure_decrypt || enc)
293 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
294 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
295 return 0;
296 }
297
298 /* key_len is two AES keys */
299 if (enc) {
300 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
301 xctx->xts.block1 = (block128_f) aesni_encrypt;
302 xctx->stream = aesni_xts_encrypt;
303 } else {
304 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
305 xctx->xts.block1 = (block128_f) aesni_decrypt;
306 xctx->stream = aesni_xts_decrypt;
307 }
308
309 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
310 xctx->xts.block2 = (block128_f) aesni_encrypt;
311
312 xctx->xts.key1 = &xctx->ks1;
313 }
314
315 if (iv) {
316 xctx->xts.key2 = &xctx->ks2;
317 memcpy(ctx->iv, iv, 16);
318 }
319
320 return 1;
321 }
322
323 # define aesni_xts_cipher aes_xts_cipher
324 static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
325 const unsigned char *in, size_t len);
326
aesni_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)327 static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
328 const unsigned char *iv, int enc)
329 {
330 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
331
332 if (iv == NULL && key == NULL)
333 return 1;
334
335 if (key != NULL) {
336 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
337
338 if (keylen <= 0) {
339 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
340 return 0;
341 }
342 aesni_set_encrypt_key(key, keylen, &cctx->ks.ks);
343 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
344 &cctx->ks, (block128_f) aesni_encrypt);
345 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
346 (ccm128_f) aesni_ccm64_decrypt_blocks;
347 cctx->key_set = 1;
348 }
349 if (iv) {
350 memcpy(ctx->iv, iv, 15 - cctx->L);
351 cctx->iv_set = 1;
352 }
353 return 1;
354 }
355
356 # define aesni_ccm_cipher aes_ccm_cipher
357 static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
358 const unsigned char *in, size_t len);
359
360 # ifndef OPENSSL_NO_OCB
aesni_ocb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)361 static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
362 const unsigned char *iv, int enc)
363 {
364 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
365
366 if (iv == NULL && key == NULL)
367 return 1;
368
369 if (key != NULL) {
370 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
371
372 if (keylen <= 0) {
373 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
374 return 0;
375 }
376 do {
377 /*
378 * We set both the encrypt and decrypt key here because decrypt
379 * needs both. We could possibly optimise to remove setting the
380 * decrypt for an encryption operation.
381 */
382 aesni_set_encrypt_key(key, keylen, &octx->ksenc.ks);
383 aesni_set_decrypt_key(key, keylen, &octx->ksdec.ks);
384 if (!CRYPTO_ocb128_init(&octx->ocb,
385 &octx->ksenc.ks, &octx->ksdec.ks,
386 (block128_f) aesni_encrypt,
387 (block128_f) aesni_decrypt,
388 enc ? aesni_ocb_encrypt
389 : aesni_ocb_decrypt))
390 return 0;
391 }
392 while (0);
393
394 /*
395 * If we have an iv we can set it directly, otherwise use saved IV.
396 */
397 if (iv == NULL && octx->iv_set)
398 iv = octx->iv;
399 if (iv) {
400 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
401 != 1)
402 return 0;
403 octx->iv_set = 1;
404 }
405 octx->key_set = 1;
406 } else {
407 /* If key set use IV, otherwise copy */
408 if (octx->key_set)
409 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
410 else
411 memcpy(octx->iv, iv, octx->ivlen);
412 octx->iv_set = 1;
413 }
414 return 1;
415 }
416
417 # define aesni_ocb_cipher aes_ocb_cipher
418 static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
419 const unsigned char *in, size_t len);
420 # endif /* OPENSSL_NO_OCB */
421
422 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
423 static const EVP_CIPHER aesni_##keylen##_##mode = { \
424 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
425 flags|EVP_CIPH_##MODE##_MODE, \
426 EVP_ORIG_GLOBAL, \
427 aesni_init_key, \
428 aesni_##mode##_cipher, \
429 NULL, \
430 sizeof(EVP_AES_KEY), \
431 NULL,NULL,NULL,NULL }; \
432 static const EVP_CIPHER aes_##keylen##_##mode = { \
433 nid##_##keylen##_##nmode,blocksize, \
434 keylen/8,ivlen, \
435 flags|EVP_CIPH_##MODE##_MODE, \
436 EVP_ORIG_GLOBAL, \
437 aes_init_key, \
438 aes_##mode##_cipher, \
439 NULL, \
440 sizeof(EVP_AES_KEY), \
441 NULL,NULL,NULL,NULL }; \
442 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
443 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
444
445 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
446 static const EVP_CIPHER aesni_##keylen##_##mode = { \
447 nid##_##keylen##_##mode,blocksize, \
448 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
449 ivlen, \
450 flags|EVP_CIPH_##MODE##_MODE, \
451 EVP_ORIG_GLOBAL, \
452 aesni_##mode##_init_key, \
453 aesni_##mode##_cipher, \
454 aes_##mode##_cleanup, \
455 sizeof(EVP_AES_##MODE##_CTX), \
456 NULL,NULL,aes_##mode##_ctrl,NULL }; \
457 static const EVP_CIPHER aes_##keylen##_##mode = { \
458 nid##_##keylen##_##mode,blocksize, \
459 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
460 ivlen, \
461 flags|EVP_CIPH_##MODE##_MODE, \
462 EVP_ORIG_GLOBAL, \
463 aes_##mode##_init_key, \
464 aes_##mode##_cipher, \
465 aes_##mode##_cleanup, \
466 sizeof(EVP_AES_##MODE##_CTX), \
467 NULL,NULL,aes_##mode##_ctrl,NULL }; \
468 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
469 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
470
471 #elif defined(SPARC_AES_CAPABLE)
472
aes_t4_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)473 static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
474 const unsigned char *iv, int enc)
475 {
476 int ret, mode, bits;
477 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
478
479 mode = EVP_CIPHER_CTX_get_mode(ctx);
480 bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
481 if (bits <= 0) {
482 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
483 return 0;
484 }
485 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
486 && !enc) {
487 ret = 0;
488 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
489 dat->block = (block128_f) aes_t4_decrypt;
490 switch (bits) {
491 case 128:
492 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
493 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
494 break;
495 case 192:
496 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
497 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
498 break;
499 case 256:
500 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
501 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
502 break;
503 default:
504 ret = -1;
505 }
506 } else {
507 ret = 0;
508 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
509 dat->block = (block128_f) aes_t4_encrypt;
510 switch (bits) {
511 case 128:
512 if (mode == EVP_CIPH_CBC_MODE)
513 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
514 else if (mode == EVP_CIPH_CTR_MODE)
515 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
516 else
517 dat->stream.cbc = NULL;
518 break;
519 case 192:
520 if (mode == EVP_CIPH_CBC_MODE)
521 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
522 else if (mode == EVP_CIPH_CTR_MODE)
523 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
524 else
525 dat->stream.cbc = NULL;
526 break;
527 case 256:
528 if (mode == EVP_CIPH_CBC_MODE)
529 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
530 else if (mode == EVP_CIPH_CTR_MODE)
531 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
532 else
533 dat->stream.cbc = NULL;
534 break;
535 default:
536 ret = -1;
537 }
538 }
539
540 if (ret < 0) {
541 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
542 return 0;
543 }
544
545 return 1;
546 }
547
548 # define aes_t4_cbc_cipher aes_cbc_cipher
549 static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
550 const unsigned char *in, size_t len);
551
552 # define aes_t4_ecb_cipher aes_ecb_cipher
553 static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
554 const unsigned char *in, size_t len);
555
556 # define aes_t4_ofb_cipher aes_ofb_cipher
557 static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
558 const unsigned char *in, size_t len);
559
560 # define aes_t4_cfb_cipher aes_cfb_cipher
561 static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
562 const unsigned char *in, size_t len);
563
564 # define aes_t4_cfb8_cipher aes_cfb8_cipher
565 static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
566 const unsigned char *in, size_t len);
567
568 # define aes_t4_cfb1_cipher aes_cfb1_cipher
569 static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
570 const unsigned char *in, size_t len);
571
572 # define aes_t4_ctr_cipher aes_ctr_cipher
573 static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
574 const unsigned char *in, size_t len);
575
aes_t4_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)576 static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
577 const unsigned char *iv, int enc)
578 {
579 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
580
581 if (iv == NULL && key == NULL)
582 return 1;
583 if (key) {
584 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
585
586 if (bits <= 0) {
587 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
588 return 0;
589 }
590 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
591 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
592 (block128_f) aes_t4_encrypt);
593 switch (bits) {
594 case 128:
595 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
596 break;
597 case 192:
598 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
599 break;
600 case 256:
601 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
602 break;
603 default:
604 return 0;
605 }
606 /*
607 * If we have an iv can set it directly, otherwise use saved IV.
608 */
609 if (iv == NULL && gctx->iv_set)
610 iv = gctx->iv;
611 if (iv) {
612 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
613 gctx->iv_set = 1;
614 }
615 gctx->key_set = 1;
616 } else {
617 /* If key set use IV, otherwise copy */
618 if (gctx->key_set)
619 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
620 else
621 memcpy(gctx->iv, iv, gctx->ivlen);
622 gctx->iv_set = 1;
623 gctx->iv_gen = 0;
624 }
625 return 1;
626 }
627
628 # define aes_t4_gcm_cipher aes_gcm_cipher
629 static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
630 const unsigned char *in, size_t len);
631
aes_t4_xts_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)632 static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
633 const unsigned char *iv, int enc)
634 {
635 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
636
637 if (!iv && !key)
638 return 1;
639
640 if (key) {
641 /* The key is two half length keys in reality */
642 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
643 const int bytes = keylen / 2;
644 const int bits = bytes * 8;
645
646 if (keylen <= 0) {
647 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
648 return 0;
649 }
650 /*
651 * Verify that the two keys are different.
652 *
653 * This addresses Rogaway's vulnerability.
654 * See comment in aes_xts_init_key() below.
655 */
656 if ((!allow_insecure_decrypt || enc)
657 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
658 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
659 return 0;
660 }
661
662 xctx->stream = NULL;
663 /* key_len is two AES keys */
664 if (enc) {
665 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
666 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
667 switch (bits) {
668 case 128:
669 xctx->stream = aes128_t4_xts_encrypt;
670 break;
671 case 256:
672 xctx->stream = aes256_t4_xts_encrypt;
673 break;
674 default:
675 return 0;
676 }
677 } else {
678 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
679 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
680 switch (bits) {
681 case 128:
682 xctx->stream = aes128_t4_xts_decrypt;
683 break;
684 case 256:
685 xctx->stream = aes256_t4_xts_decrypt;
686 break;
687 default:
688 return 0;
689 }
690 }
691
692 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
693 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
694
695 xctx->xts.key1 = &xctx->ks1;
696 }
697
698 if (iv) {
699 xctx->xts.key2 = &xctx->ks2;
700 memcpy(ctx->iv, iv, 16);
701 }
702
703 return 1;
704 }
705
706 # define aes_t4_xts_cipher aes_xts_cipher
707 static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
708 const unsigned char *in, size_t len);
709
aes_t4_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)710 static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
711 const unsigned char *iv, int enc)
712 {
713 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
714
715 if (iv == NULL && key == NULL)
716 return 1;
717
718 if (key != NULL) {
719 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
720
721 if (bits <= 0) {
722 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
723 return 0;
724 }
725 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
726 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
727 &cctx->ks, (block128_f) aes_t4_encrypt);
728 cctx->str = NULL;
729 cctx->key_set = 1;
730 }
731 if (iv) {
732 memcpy(ctx->iv, iv, 15 - cctx->L);
733 cctx->iv_set = 1;
734 }
735 return 1;
736 }
737
738 # define aes_t4_ccm_cipher aes_ccm_cipher
739 static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
740 const unsigned char *in, size_t len);
741
742 # ifndef OPENSSL_NO_OCB
aes_t4_ocb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)743 static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
744 const unsigned char *iv, int enc)
745 {
746 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
747
748 if (iv == NULL && key == NULL)
749 return 1;
750
751 if (key != NULL) {
752 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
753
754 if (keylen <= 0) {
755 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
756 return 0;
757 }
758 do {
759 /*
760 * We set both the encrypt and decrypt key here because decrypt
761 * needs both. We could possibly optimise to remove setting the
762 * decrypt for an encryption operation.
763 */
764 aes_t4_set_encrypt_key(key, keylen, &octx->ksenc.ks);
765 aes_t4_set_decrypt_key(key, keylen, &octx->ksdec.ks);
766 if (!CRYPTO_ocb128_init(&octx->ocb,
767 &octx->ksenc.ks, &octx->ksdec.ks,
768 (block128_f) aes_t4_encrypt,
769 (block128_f) aes_t4_decrypt,
770 NULL))
771 return 0;
772 }
773 while (0);
774
775 /*
776 * If we have an iv we can set it directly, otherwise use saved IV.
777 */
778 if (iv == NULL && octx->iv_set)
779 iv = octx->iv;
780 if (iv) {
781 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
782 != 1)
783 return 0;
784 octx->iv_set = 1;
785 }
786 octx->key_set = 1;
787 } else {
788 /* If key set use IV, otherwise copy */
789 if (octx->key_set)
790 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
791 else
792 memcpy(octx->iv, iv, octx->ivlen);
793 octx->iv_set = 1;
794 }
795 return 1;
796 }
797
798 # define aes_t4_ocb_cipher aes_ocb_cipher
799 static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
800 const unsigned char *in, size_t len);
801 # endif /* OPENSSL_NO_OCB */
802
803 # ifndef OPENSSL_NO_SIV
804 # define aes_t4_siv_init_key aes_siv_init_key
805 # define aes_t4_siv_cipher aes_siv_cipher
806 # endif /* OPENSSL_NO_SIV */
807
808 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
809 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
810 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
811 flags|EVP_CIPH_##MODE##_MODE, \
812 EVP_ORIG_GLOBAL, \
813 aes_t4_init_key, \
814 aes_t4_##mode##_cipher, \
815 NULL, \
816 sizeof(EVP_AES_KEY), \
817 NULL,NULL,NULL,NULL }; \
818 static const EVP_CIPHER aes_##keylen##_##mode = { \
819 nid##_##keylen##_##nmode,blocksize, \
820 keylen/8,ivlen, \
821 flags|EVP_CIPH_##MODE##_MODE, \
822 EVP_ORIG_GLOBAL, \
823 aes_init_key, \
824 aes_##mode##_cipher, \
825 NULL, \
826 sizeof(EVP_AES_KEY), \
827 NULL,NULL,NULL,NULL }; \
828 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
829 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
830
831 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
832 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
833 nid##_##keylen##_##mode,blocksize, \
834 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
835 ivlen, \
836 flags|EVP_CIPH_##MODE##_MODE, \
837 EVP_ORIG_GLOBAL, \
838 aes_t4_##mode##_init_key, \
839 aes_t4_##mode##_cipher, \
840 aes_##mode##_cleanup, \
841 sizeof(EVP_AES_##MODE##_CTX), \
842 NULL,NULL,aes_##mode##_ctrl,NULL }; \
843 static const EVP_CIPHER aes_##keylen##_##mode = { \
844 nid##_##keylen##_##mode,blocksize, \
845 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
846 ivlen, \
847 flags|EVP_CIPH_##MODE##_MODE, \
848 EVP_ORIG_GLOBAL, \
849 aes_##mode##_init_key, \
850 aes_##mode##_cipher, \
851 aes_##mode##_cleanup, \
852 sizeof(EVP_AES_##MODE##_CTX), \
853 NULL,NULL,aes_##mode##_ctrl,NULL }; \
854 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
855 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
856
857 #elif defined(S390X_aes_128_CAPABLE)
858 /* IBM S390X support */
859 typedef struct {
860 union {
861 OSSL_UNION_ALIGN;
862 /*-
863 * KM-AES parameter block - begin
864 * (see z/Architecture Principles of Operation >= SA22-7832-06)
865 */
866 struct {
867 unsigned char k[32];
868 } param;
869 /* KM-AES parameter block - end */
870 } km;
871 unsigned int fc;
872 } S390X_AES_ECB_CTX;
873
874 typedef struct {
875 union {
876 OSSL_UNION_ALIGN;
877 /*-
878 * KMO-AES parameter block - begin
879 * (see z/Architecture Principles of Operation >= SA22-7832-08)
880 */
881 struct {
882 unsigned char cv[16];
883 unsigned char k[32];
884 } param;
885 /* KMO-AES parameter block - end */
886 } kmo;
887 unsigned int fc;
888 } S390X_AES_OFB_CTX;
889
890 typedef struct {
891 union {
892 OSSL_UNION_ALIGN;
893 /*-
894 * KMF-AES parameter block - begin
895 * (see z/Architecture Principles of Operation >= SA22-7832-08)
896 */
897 struct {
898 unsigned char cv[16];
899 unsigned char k[32];
900 } param;
901 /* KMF-AES parameter block - end */
902 } kmf;
903 unsigned int fc;
904 } S390X_AES_CFB_CTX;
905
906 typedef struct {
907 union {
908 OSSL_UNION_ALIGN;
909 /*-
910 * KMA-GCM-AES parameter block - begin
911 * (see z/Architecture Principles of Operation >= SA22-7832-11)
912 */
913 struct {
914 unsigned char reserved[12];
915 union {
916 unsigned int w;
917 unsigned char b[4];
918 } cv;
919 union {
920 unsigned long long g[2];
921 unsigned char b[16];
922 } t;
923 unsigned char h[16];
924 unsigned long long taadl;
925 unsigned long long tpcl;
926 union {
927 unsigned long long g[2];
928 unsigned int w[4];
929 } j0;
930 unsigned char k[32];
931 } param;
932 /* KMA-GCM-AES parameter block - end */
933 } kma;
934 unsigned int fc;
935 int key_set;
936
937 unsigned char *iv;
938 int ivlen;
939 int iv_set;
940 int iv_gen;
941
942 int taglen;
943
944 unsigned char ares[16];
945 unsigned char mres[16];
946 unsigned char kres[16];
947 int areslen;
948 int mreslen;
949 int kreslen;
950
951 int tls_aad_len;
952 uint64_t tls_enc_records; /* Number of TLS records encrypted */
953 } S390X_AES_GCM_CTX;
954
955 typedef struct {
956 union {
957 OSSL_UNION_ALIGN;
958 /*-
959 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
960 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
961 * rounds field is used to store the function code and that the key
962 * schedule is not stored (if aes hardware support is detected).
963 */
964 struct {
965 unsigned char pad[16];
966 AES_KEY k;
967 } key;
968
969 struct {
970 /*-
971 * KMAC-AES parameter block - begin
972 * (see z/Architecture Principles of Operation >= SA22-7832-08)
973 */
974 struct {
975 union {
976 unsigned long long g[2];
977 unsigned char b[16];
978 } icv;
979 unsigned char k[32];
980 } kmac_param;
981 /* KMAC-AES parameter block - end */
982
983 union {
984 unsigned long long g[2];
985 unsigned char b[16];
986 } nonce;
987 union {
988 unsigned long long g[2];
989 unsigned char b[16];
990 } buf;
991
992 unsigned long long blocks;
993 int l;
994 int m;
995 int tls_aad_len;
996 int iv_set;
997 int tag_set;
998 int len_set;
999 int key_set;
1000
1001 unsigned char pad[140];
1002 unsigned int fc;
1003 } ccm;
1004 } aes;
1005 } S390X_AES_CCM_CTX;
1006
1007 # define s390x_aes_init_key aes_init_key
1008 static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
1009 const unsigned char *iv, int enc);
1010
1011 # define S390X_AES_CBC_CTX EVP_AES_KEY
1012
1013 # define s390x_aes_cbc_init_key aes_init_key
1014
1015 # define s390x_aes_cbc_cipher aes_cbc_cipher
1016 static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1017 const unsigned char *in, size_t len);
1018
s390x_aes_ecb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)1019 static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
1020 const unsigned char *key,
1021 const unsigned char *iv, int enc)
1022 {
1023 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1024 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1025
1026 if (keylen <= 0) {
1027 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1028 return 0;
1029 }
1030 cctx->fc = S390X_AES_FC(keylen);
1031 if (!enc)
1032 cctx->fc |= S390X_DECRYPT;
1033
1034 memcpy(cctx->km.param.k, key, keylen);
1035 return 1;
1036 }
1037
s390x_aes_ecb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1038 static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1039 const unsigned char *in, size_t len)
1040 {
1041 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1042
1043 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
1044 return 1;
1045 }
1046
s390x_aes_ofb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * ivec,int enc)1047 static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
1048 const unsigned char *key,
1049 const unsigned char *ivec, int enc)
1050 {
1051 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1052 const unsigned char *iv = ctx->oiv;
1053 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1054 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1055
1056 if (keylen <= 0) {
1057 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1058 return 0;
1059 }
1060 if (ivlen <= 0) {
1061 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1062 return 0;
1063 }
1064 memcpy(cctx->kmo.param.cv, iv, ivlen);
1065 memcpy(cctx->kmo.param.k, key, keylen);
1066 cctx->fc = S390X_AES_FC(keylen);
1067 return 1;
1068 }
1069
s390x_aes_ofb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1070 static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1071 const unsigned char *in, size_t len)
1072 {
1073 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1074 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1075 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1076 int n = ctx->num;
1077 int rem;
1078
1079 memcpy(cctx->kmo.param.cv, iv, ivlen);
1080 while (n && len) {
1081 *out = *in ^ cctx->kmo.param.cv[n];
1082 n = (n + 1) & 0xf;
1083 --len;
1084 ++in;
1085 ++out;
1086 }
1087
1088 rem = len & 0xf;
1089
1090 len &= ~(size_t)0xf;
1091 if (len) {
1092 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1093
1094 out += len;
1095 in += len;
1096 }
1097
1098 if (rem) {
1099 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1100 cctx->kmo.param.k);
1101
1102 while (rem--) {
1103 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1104 ++n;
1105 }
1106 }
1107
1108 memcpy(iv, cctx->kmo.param.cv, ivlen);
1109 ctx->num = n;
1110 return 1;
1111 }
1112
s390x_aes_cfb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * ivec,int enc)1113 static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1114 const unsigned char *key,
1115 const unsigned char *ivec, int enc)
1116 {
1117 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1118 const unsigned char *iv = ctx->oiv;
1119 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1120 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1121
1122 if (keylen <= 0) {
1123 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1124 return 0;
1125 }
1126 if (ivlen <= 0) {
1127 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1128 return 0;
1129 }
1130 cctx->fc = S390X_AES_FC(keylen);
1131 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1132 if (!enc)
1133 cctx->fc |= S390X_DECRYPT;
1134
1135 memcpy(cctx->kmf.param.cv, iv, ivlen);
1136 memcpy(cctx->kmf.param.k, key, keylen);
1137 return 1;
1138 }
1139
s390x_aes_cfb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1140 static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1141 const unsigned char *in, size_t len)
1142 {
1143 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1144 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1145 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1146 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1147 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1148 int n = ctx->num;
1149 int rem;
1150 unsigned char tmp;
1151
1152 if (keylen <= 0) {
1153 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1154 return 0;
1155 }
1156 if (ivlen <= 0) {
1157 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1158 return 0;
1159 }
1160 memcpy(cctx->kmf.param.cv, iv, ivlen);
1161 while (n && len) {
1162 tmp = *in;
1163 *out = cctx->kmf.param.cv[n] ^ tmp;
1164 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1165 n = (n + 1) & 0xf;
1166 --len;
1167 ++in;
1168 ++out;
1169 }
1170
1171 rem = len & 0xf;
1172
1173 len &= ~(size_t)0xf;
1174 if (len) {
1175 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1176
1177 out += len;
1178 in += len;
1179 }
1180
1181 if (rem) {
1182 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1183 S390X_AES_FC(keylen), cctx->kmf.param.k);
1184
1185 while (rem--) {
1186 tmp = in[n];
1187 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1188 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1189 ++n;
1190 }
1191 }
1192
1193 memcpy(iv, cctx->kmf.param.cv, ivlen);
1194 ctx->num = n;
1195 return 1;
1196 }
1197
s390x_aes_cfb8_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * ivec,int enc)1198 static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1199 const unsigned char *key,
1200 const unsigned char *ivec, int enc)
1201 {
1202 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1203 const unsigned char *iv = ctx->oiv;
1204 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1205 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1206
1207 if (keylen <= 0) {
1208 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1209 return 0;
1210 }
1211 if (ivlen <= 0) {
1212 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1213 return 0;
1214 }
1215 cctx->fc = S390X_AES_FC(keylen);
1216 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1217 if (!enc)
1218 cctx->fc |= S390X_DECRYPT;
1219
1220 memcpy(cctx->kmf.param.cv, iv, ivlen);
1221 memcpy(cctx->kmf.param.k, key, keylen);
1222 return 1;
1223 }
1224
s390x_aes_cfb8_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1225 static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1226 const unsigned char *in, size_t len)
1227 {
1228 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1229 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1230 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1231
1232 memcpy(cctx->kmf.param.cv, iv, ivlen);
1233 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1234 memcpy(iv, cctx->kmf.param.cv, ivlen);
1235 return 1;
1236 }
1237
1238 # define s390x_aes_cfb1_init_key aes_init_key
1239
1240 # define s390x_aes_cfb1_cipher aes_cfb1_cipher
1241 static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1242 const unsigned char *in, size_t len);
1243
1244 # define S390X_AES_CTR_CTX EVP_AES_KEY
1245
1246 # define s390x_aes_ctr_init_key aes_init_key
1247
1248 # define s390x_aes_ctr_cipher aes_ctr_cipher
1249 static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1250 const unsigned char *in, size_t len);
1251
1252 /* iv + padding length for iv lengths != 12 */
1253 # define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1254
1255 /*-
1256 * Process additional authenticated data. Returns 0 on success. Code is
1257 * big-endian.
1258 */
s390x_aes_gcm_aad(S390X_AES_GCM_CTX * ctx,const unsigned char * aad,size_t len)1259 static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1260 size_t len)
1261 {
1262 unsigned long long alen;
1263 int n, rem;
1264
1265 if (ctx->kma.param.tpcl)
1266 return -2;
1267
1268 alen = ctx->kma.param.taadl + len;
1269 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1270 return -1;
1271 ctx->kma.param.taadl = alen;
1272
1273 n = ctx->areslen;
1274 if (n) {
1275 while (n && len) {
1276 ctx->ares[n] = *aad;
1277 n = (n + 1) & 0xf;
1278 ++aad;
1279 --len;
1280 }
1281 /* ctx->ares contains a complete block if offset has wrapped around */
1282 if (!n) {
1283 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1284 ctx->fc |= S390X_KMA_HS;
1285 }
1286 ctx->areslen = n;
1287 }
1288
1289 rem = len & 0xf;
1290
1291 len &= ~(size_t)0xf;
1292 if (len) {
1293 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1294 aad += len;
1295 ctx->fc |= S390X_KMA_HS;
1296 }
1297
1298 if (rem) {
1299 ctx->areslen = rem;
1300
1301 do {
1302 --rem;
1303 ctx->ares[rem] = aad[rem];
1304 } while (rem);
1305 }
1306 return 0;
1307 }
1308
1309 /*-
1310 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1311 * success. Code is big-endian.
1312 */
s390x_aes_gcm(S390X_AES_GCM_CTX * ctx,const unsigned char * in,unsigned char * out,size_t len)1313 static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1314 unsigned char *out, size_t len)
1315 {
1316 const unsigned char *inptr;
1317 unsigned long long mlen;
1318 union {
1319 unsigned int w[4];
1320 unsigned char b[16];
1321 } buf;
1322 size_t inlen;
1323 int n, rem, i;
1324
1325 mlen = ctx->kma.param.tpcl + len;
1326 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1327 return -1;
1328 ctx->kma.param.tpcl = mlen;
1329
1330 n = ctx->mreslen;
1331 if (n) {
1332 inptr = in;
1333 inlen = len;
1334 while (n && inlen) {
1335 ctx->mres[n] = *inptr;
1336 n = (n + 1) & 0xf;
1337 ++inptr;
1338 --inlen;
1339 }
1340 /* ctx->mres contains a complete block if offset has wrapped around */
1341 if (!n) {
1342 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1343 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1344 ctx->fc |= S390X_KMA_HS;
1345 ctx->areslen = 0;
1346
1347 /* previous call already encrypted/decrypted its remainder,
1348 * see comment below */
1349 n = ctx->mreslen;
1350 while (n) {
1351 *out = buf.b[n];
1352 n = (n + 1) & 0xf;
1353 ++out;
1354 ++in;
1355 --len;
1356 }
1357 ctx->mreslen = 0;
1358 }
1359 }
1360
1361 rem = len & 0xf;
1362
1363 len &= ~(size_t)0xf;
1364 if (len) {
1365 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1366 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1367 in += len;
1368 out += len;
1369 ctx->fc |= S390X_KMA_HS;
1370 ctx->areslen = 0;
1371 }
1372
1373 /*-
1374 * If there is a remainder, it has to be saved such that it can be
1375 * processed by kma later. However, we also have to do the for-now
1376 * unauthenticated encryption/decryption part here and now...
1377 */
1378 if (rem) {
1379 if (!ctx->mreslen) {
1380 buf.w[0] = ctx->kma.param.j0.w[0];
1381 buf.w[1] = ctx->kma.param.j0.w[1];
1382 buf.w[2] = ctx->kma.param.j0.w[2];
1383 buf.w[3] = ctx->kma.param.cv.w + 1;
1384 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1385 }
1386
1387 n = ctx->mreslen;
1388 for (i = 0; i < rem; i++) {
1389 ctx->mres[n + i] = in[i];
1390 out[i] = in[i] ^ ctx->kres[n + i];
1391 }
1392
1393 ctx->mreslen += rem;
1394 }
1395 return 0;
1396 }
1397
1398 /*-
1399 * Initialize context structure. Code is big-endian.
1400 */
s390x_aes_gcm_setiv(S390X_AES_GCM_CTX * ctx,const unsigned char * iv)1401 static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1402 const unsigned char *iv)
1403 {
1404 ctx->kma.param.t.g[0] = 0;
1405 ctx->kma.param.t.g[1] = 0;
1406 ctx->kma.param.tpcl = 0;
1407 ctx->kma.param.taadl = 0;
1408 ctx->mreslen = 0;
1409 ctx->areslen = 0;
1410 ctx->kreslen = 0;
1411
1412 if (ctx->ivlen == 12) {
1413 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1414 ctx->kma.param.j0.w[3] = 1;
1415 ctx->kma.param.cv.w = 1;
1416 } else {
1417 /* ctx->iv has the right size and is already padded. */
1418 memcpy(ctx->iv, iv, ctx->ivlen);
1419 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1420 ctx->fc, &ctx->kma.param);
1421 ctx->fc |= S390X_KMA_HS;
1422
1423 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1424 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1425 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1426 ctx->kma.param.t.g[0] = 0;
1427 ctx->kma.param.t.g[1] = 0;
1428 }
1429 }
1430
1431 /*-
1432 * Performs various operations on the context structure depending on control
1433 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1434 * Code is big-endian.
1435 */
s390x_aes_gcm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)1436 static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1437 {
1438 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1439 S390X_AES_GCM_CTX *gctx_out;
1440 EVP_CIPHER_CTX *out;
1441 unsigned char *buf;
1442 int ivlen, enc, len;
1443
1444 switch (type) {
1445 case EVP_CTRL_INIT:
1446 ivlen = EVP_CIPHER_get_iv_length(c->cipher);
1447 gctx->key_set = 0;
1448 gctx->iv_set = 0;
1449 gctx->ivlen = ivlen;
1450 gctx->iv = c->iv;
1451 gctx->taglen = -1;
1452 gctx->iv_gen = 0;
1453 gctx->tls_aad_len = -1;
1454 return 1;
1455
1456 case EVP_CTRL_GET_IVLEN:
1457 *(int *)ptr = gctx->ivlen;
1458 return 1;
1459
1460 case EVP_CTRL_AEAD_SET_IVLEN:
1461 if (arg <= 0)
1462 return 0;
1463
1464 if (arg != 12) {
1465 len = S390X_gcm_ivpadlen(arg);
1466
1467 /* Allocate memory for iv if needed. */
1468 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1469 if (gctx->iv != c->iv)
1470 OPENSSL_free(gctx->iv);
1471
1472 if ((gctx->iv = OPENSSL_malloc(len)) == NULL)
1473 return 0;
1474 }
1475 /* Add padding. */
1476 memset(gctx->iv + arg, 0, len - arg - 8);
1477 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1478 }
1479 gctx->ivlen = arg;
1480 return 1;
1481
1482 case EVP_CTRL_AEAD_SET_TAG:
1483 buf = EVP_CIPHER_CTX_buf_noconst(c);
1484 enc = EVP_CIPHER_CTX_is_encrypting(c);
1485 if (arg <= 0 || arg > 16 || enc)
1486 return 0;
1487
1488 memcpy(buf, ptr, arg);
1489 gctx->taglen = arg;
1490 return 1;
1491
1492 case EVP_CTRL_AEAD_GET_TAG:
1493 enc = EVP_CIPHER_CTX_is_encrypting(c);
1494 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1495 return 0;
1496
1497 memcpy(ptr, gctx->kma.param.t.b, arg);
1498 return 1;
1499
1500 case EVP_CTRL_GCM_SET_IV_FIXED:
1501 /* Special case: -1 length restores whole iv */
1502 if (arg == -1) {
1503 memcpy(gctx->iv, ptr, gctx->ivlen);
1504 gctx->iv_gen = 1;
1505 return 1;
1506 }
1507 /*
1508 * Fixed field must be at least 4 bytes and invocation field at least
1509 * 8.
1510 */
1511 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1512 return 0;
1513
1514 if (arg)
1515 memcpy(gctx->iv, ptr, arg);
1516
1517 enc = EVP_CIPHER_CTX_is_encrypting(c);
1518 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1519 return 0;
1520
1521 gctx->iv_gen = 1;
1522 return 1;
1523
1524 case EVP_CTRL_GCM_IV_GEN:
1525 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1526 return 0;
1527
1528 s390x_aes_gcm_setiv(gctx, gctx->iv);
1529
1530 if (arg <= 0 || arg > gctx->ivlen)
1531 arg = gctx->ivlen;
1532
1533 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1534 /*
1535 * Invocation field will be at least 8 bytes in size and so no need
1536 * to check wrap around or increment more than last 8 bytes.
1537 */
1538 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1539 gctx->iv_set = 1;
1540 return 1;
1541
1542 case EVP_CTRL_GCM_SET_IV_INV:
1543 enc = EVP_CIPHER_CTX_is_encrypting(c);
1544 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1545 return 0;
1546
1547 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1548 s390x_aes_gcm_setiv(gctx, gctx->iv);
1549 gctx->iv_set = 1;
1550 return 1;
1551
1552 case EVP_CTRL_AEAD_TLS1_AAD:
1553 /* Save the aad for later use. */
1554 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1555 return 0;
1556
1557 buf = EVP_CIPHER_CTX_buf_noconst(c);
1558 memcpy(buf, ptr, arg);
1559 gctx->tls_aad_len = arg;
1560 gctx->tls_enc_records = 0;
1561
1562 len = buf[arg - 2] << 8 | buf[arg - 1];
1563 /* Correct length for explicit iv. */
1564 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1565 return 0;
1566 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1567
1568 /* If decrypting correct for tag too. */
1569 enc = EVP_CIPHER_CTX_is_encrypting(c);
1570 if (!enc) {
1571 if (len < EVP_GCM_TLS_TAG_LEN)
1572 return 0;
1573 len -= EVP_GCM_TLS_TAG_LEN;
1574 }
1575 buf[arg - 2] = len >> 8;
1576 buf[arg - 1] = len & 0xff;
1577 /* Extra padding: tag appended to record. */
1578 return EVP_GCM_TLS_TAG_LEN;
1579
1580 case EVP_CTRL_COPY:
1581 out = ptr;
1582 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1583
1584 if (gctx->iv == c->iv) {
1585 gctx_out->iv = out->iv;
1586 } else {
1587 len = S390X_gcm_ivpadlen(gctx->ivlen);
1588
1589 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL)
1590 return 0;
1591
1592 memcpy(gctx_out->iv, gctx->iv, len);
1593 }
1594 return 1;
1595
1596 default:
1597 return -1;
1598 }
1599 }
1600
1601 /*-
1602 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1603 */
s390x_aes_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)1604 static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1605 const unsigned char *key,
1606 const unsigned char *iv, int enc)
1607 {
1608 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1609 int keylen;
1610
1611 if (iv == NULL && key == NULL)
1612 return 1;
1613
1614 if (key != NULL) {
1615 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1616 if (keylen <= 0) {
1617 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1618 return 0;
1619 }
1620
1621 memcpy(&gctx->kma.param.k, key, keylen);
1622
1623 gctx->fc = S390X_AES_FC(keylen);
1624 if (!enc)
1625 gctx->fc |= S390X_DECRYPT;
1626
1627 if (iv == NULL && gctx->iv_set)
1628 iv = gctx->iv;
1629
1630 if (iv != NULL) {
1631 s390x_aes_gcm_setiv(gctx, iv);
1632 gctx->iv_set = 1;
1633 }
1634 gctx->key_set = 1;
1635 } else {
1636 if (gctx->key_set)
1637 s390x_aes_gcm_setiv(gctx, iv);
1638 else
1639 memcpy(gctx->iv, iv, gctx->ivlen);
1640
1641 gctx->iv_set = 1;
1642 gctx->iv_gen = 0;
1643 }
1644 return 1;
1645 }
1646
1647 /*-
1648 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1649 * if successful. Otherwise -1 is returned. Code is big-endian.
1650 */
s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1651 static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1652 const unsigned char *in, size_t len)
1653 {
1654 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1655 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1656 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1657 int rv = -1;
1658
1659 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1660 return -1;
1661
1662 /*
1663 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1664 * Requirements from SP 800-38D". The requirements is for one party to the
1665 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1666 * side only.
1667 */
1668 if (enc && ++gctx->tls_enc_records == 0) {
1669 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
1670 goto err;
1671 }
1672
1673 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1674 : EVP_CTRL_GCM_SET_IV_INV,
1675 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1676 goto err;
1677
1678 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1679 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1680 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1681
1682 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1683 gctx->kma.param.tpcl = len << 3;
1684 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1685 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1686
1687 if (enc) {
1688 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1689 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1690 } else {
1691 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1692 EVP_GCM_TLS_TAG_LEN)) {
1693 OPENSSL_cleanse(out, len);
1694 goto err;
1695 }
1696 rv = len;
1697 }
1698 err:
1699 gctx->iv_set = 0;
1700 gctx->tls_aad_len = -1;
1701 return rv;
1702 }
1703
1704 /*-
1705 * Called from EVP layer to initialize context, process additional
1706 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1707 * ciphertext or process a TLS packet, depending on context. Returns bytes
1708 * written on success. Otherwise -1 is returned. Code is big-endian.
1709 */
s390x_aes_gcm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1710 static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1711 const unsigned char *in, size_t len)
1712 {
1713 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1714 unsigned char *buf, tmp[16];
1715 int enc;
1716
1717 if (!gctx->key_set)
1718 return -1;
1719
1720 if (gctx->tls_aad_len >= 0)
1721 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1722
1723 if (!gctx->iv_set)
1724 return -1;
1725
1726 if (in != NULL) {
1727 if (out == NULL) {
1728 if (s390x_aes_gcm_aad(gctx, in, len))
1729 return -1;
1730 } else {
1731 if (s390x_aes_gcm(gctx, in, out, len))
1732 return -1;
1733 }
1734 return len;
1735 } else {
1736 gctx->kma.param.taadl <<= 3;
1737 gctx->kma.param.tpcl <<= 3;
1738 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1739 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1740 /* recall that we already did en-/decrypt gctx->mres
1741 * and returned it to caller... */
1742 OPENSSL_cleanse(tmp, gctx->mreslen);
1743 gctx->iv_set = 0;
1744
1745 enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1746 if (enc) {
1747 gctx->taglen = 16;
1748 } else {
1749 if (gctx->taglen < 0)
1750 return -1;
1751
1752 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1753 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1754 return -1;
1755 }
1756 return 0;
1757 }
1758 }
1759
s390x_aes_gcm_cleanup(EVP_CIPHER_CTX * c)1760 static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1761 {
1762 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1763
1764 if (gctx == NULL)
1765 return 0;
1766
1767 if (gctx->iv != c->iv)
1768 OPENSSL_free(gctx->iv);
1769
1770 OPENSSL_cleanse(gctx, sizeof(*gctx));
1771 return 1;
1772 }
1773
1774 # define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1775
1776 # define s390x_aes_xts_init_key aes_xts_init_key
1777 static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1778 const unsigned char *key,
1779 const unsigned char *iv, int enc);
1780 # define s390x_aes_xts_cipher aes_xts_cipher
1781 static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1782 const unsigned char *in, size_t len);
1783 # define s390x_aes_xts_ctrl aes_xts_ctrl
1784 static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1785 # define s390x_aes_xts_cleanup aes_xts_cleanup
1786
1787 /*-
1788 * Set nonce and length fields. Code is big-endian.
1789 */
s390x_aes_ccm_setiv(S390X_AES_CCM_CTX * ctx,const unsigned char * nonce,size_t mlen)1790 static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1791 const unsigned char *nonce,
1792 size_t mlen)
1793 {
1794 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1795 ctx->aes.ccm.nonce.g[1] = mlen;
1796 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1797 }
1798
1799 /*-
1800 * Process additional authenticated data. Code is big-endian.
1801 */
s390x_aes_ccm_aad(S390X_AES_CCM_CTX * ctx,const unsigned char * aad,size_t alen)1802 static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1803 size_t alen)
1804 {
1805 unsigned char *ptr;
1806 int i, rem;
1807
1808 if (!alen)
1809 return;
1810
1811 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1812
1813 /* Suppress 'type-punned pointer dereference' warning. */
1814 ptr = ctx->aes.ccm.buf.b;
1815
1816 if (alen < ((1 << 16) - (1 << 8))) {
1817 *(uint16_t *)ptr = alen;
1818 i = 2;
1819 } else if (sizeof(alen) == 8
1820 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1821 *(uint16_t *)ptr = 0xffff;
1822 *(uint64_t *)(ptr + 2) = alen;
1823 i = 10;
1824 } else {
1825 *(uint16_t *)ptr = 0xfffe;
1826 *(uint32_t *)(ptr + 2) = alen;
1827 i = 6;
1828 }
1829
1830 while (i < 16 && alen) {
1831 ctx->aes.ccm.buf.b[i] = *aad;
1832 ++aad;
1833 --alen;
1834 ++i;
1835 }
1836 while (i < 16) {
1837 ctx->aes.ccm.buf.b[i] = 0;
1838 ++i;
1839 }
1840
1841 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1842 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1843 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1844 &ctx->aes.ccm.kmac_param);
1845 ctx->aes.ccm.blocks += 2;
1846
1847 rem = alen & 0xf;
1848 alen &= ~(size_t)0xf;
1849 if (alen) {
1850 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1851 ctx->aes.ccm.blocks += alen >> 4;
1852 aad += alen;
1853 }
1854 if (rem) {
1855 for (i = 0; i < rem; i++)
1856 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1857
1858 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1859 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1860 ctx->aes.ccm.kmac_param.k);
1861 ctx->aes.ccm.blocks++;
1862 }
1863 }
1864
1865 /*-
1866 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1867 * success.
1868 */
s390x_aes_ccm(S390X_AES_CCM_CTX * ctx,const unsigned char * in,unsigned char * out,size_t len,int enc)1869 static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1870 unsigned char *out, size_t len, int enc)
1871 {
1872 size_t n, rem;
1873 unsigned int i, l, num;
1874 unsigned char flags;
1875
1876 flags = ctx->aes.ccm.nonce.b[0];
1877 if (!(flags & S390X_CCM_AAD_FLAG)) {
1878 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1879 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1880 ctx->aes.ccm.blocks++;
1881 }
1882 l = flags & 0x7;
1883 ctx->aes.ccm.nonce.b[0] = l;
1884
1885 /*-
1886 * Reconstruct length from encoded length field
1887 * and initialize it with counter value.
1888 */
1889 n = 0;
1890 for (i = 15 - l; i < 15; i++) {
1891 n |= ctx->aes.ccm.nonce.b[i];
1892 ctx->aes.ccm.nonce.b[i] = 0;
1893 n <<= 8;
1894 }
1895 n |= ctx->aes.ccm.nonce.b[15];
1896 ctx->aes.ccm.nonce.b[15] = 1;
1897
1898 if (n != len)
1899 return -1; /* length mismatch */
1900
1901 if (enc) {
1902 /* Two operations per block plus one for tag encryption */
1903 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1904 if (ctx->aes.ccm.blocks > (1ULL << 61))
1905 return -2; /* too much data */
1906 }
1907
1908 num = 0;
1909 rem = len & 0xf;
1910 len &= ~(size_t)0xf;
1911
1912 if (enc) {
1913 /* mac-then-encrypt */
1914 if (len)
1915 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1916 if (rem) {
1917 for (i = 0; i < rem; i++)
1918 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1919
1920 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1921 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1922 ctx->aes.ccm.kmac_param.k);
1923 }
1924
1925 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1926 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1927 &num, (ctr128_f)AES_ctr32_encrypt);
1928 } else {
1929 /* decrypt-then-mac */
1930 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1931 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1932 &num, (ctr128_f)AES_ctr32_encrypt);
1933
1934 if (len)
1935 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1936 if (rem) {
1937 for (i = 0; i < rem; i++)
1938 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1939
1940 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1941 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1942 ctx->aes.ccm.kmac_param.k);
1943 }
1944 }
1945 /* encrypt tag */
1946 for (i = 15 - l; i < 16; i++)
1947 ctx->aes.ccm.nonce.b[i] = 0;
1948
1949 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1950 ctx->aes.ccm.kmac_param.k);
1951 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1952 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1953
1954 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1955 return 0;
1956 }
1957
1958 /*-
1959 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1960 * if successful. Otherwise -1 is returned.
1961 */
s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)1962 static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1963 const unsigned char *in, size_t len)
1964 {
1965 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1966 unsigned char *ivec = ctx->iv;
1967 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1968 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1969
1970 if (out != in
1971 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1972 return -1;
1973
1974 if (enc) {
1975 /* Set explicit iv (sequence number). */
1976 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1977 }
1978
1979 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1980 /*-
1981 * Get explicit iv (sequence number). We already have fixed iv
1982 * (server/client_write_iv) here.
1983 */
1984 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1985 s390x_aes_ccm_setiv(cctx, ivec, len);
1986
1987 /* Process aad (sequence number|type|version|length) */
1988 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1989
1990 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1991 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1992
1993 if (enc) {
1994 if (s390x_aes_ccm(cctx, in, out, len, enc))
1995 return -1;
1996
1997 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
1998 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1999 } else {
2000 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2001 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
2002 cctx->aes.ccm.m))
2003 return len;
2004 }
2005
2006 OPENSSL_cleanse(out, len);
2007 return -1;
2008 }
2009 }
2010
2011 /*-
2012 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
2013 * returned.
2014 */
s390x_aes_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)2015 static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
2016 const unsigned char *key,
2017 const unsigned char *iv, int enc)
2018 {
2019 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2020 int keylen;
2021
2022 if (iv == NULL && key == NULL)
2023 return 1;
2024
2025 if (key != NULL) {
2026 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
2027 if (keylen <= 0) {
2028 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2029 return 0;
2030 }
2031
2032 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
2033 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
2034
2035 /* Store encoded m and l. */
2036 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
2037 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
2038 memset(cctx->aes.ccm.nonce.b + 1, 0,
2039 sizeof(cctx->aes.ccm.nonce.b));
2040 cctx->aes.ccm.blocks = 0;
2041
2042 cctx->aes.ccm.key_set = 1;
2043 }
2044
2045 if (iv != NULL) {
2046 memcpy(ctx->iv, iv, 15 - cctx->aes.ccm.l);
2047
2048 cctx->aes.ccm.iv_set = 1;
2049 }
2050
2051 return 1;
2052 }
2053
2054 /*-
2055 * Called from EVP layer to initialize context, process additional
2056 * authenticated data, en/de-crypt plain/cipher-text and authenticate
2057 * plaintext or process a TLS packet, depending on context. Returns bytes
2058 * written on success. Otherwise -1 is returned.
2059 */
s390x_aes_ccm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2060 static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2061 const unsigned char *in, size_t len)
2062 {
2063 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2064 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
2065 int rv;
2066 unsigned char *buf;
2067
2068 if (!cctx->aes.ccm.key_set)
2069 return -1;
2070
2071 if (cctx->aes.ccm.tls_aad_len >= 0)
2072 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
2073
2074 /*-
2075 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
2076 * so integrity must be checked already at Update() i.e., before
2077 * potentially corrupted data is output.
2078 */
2079 if (in == NULL && out != NULL)
2080 return 0;
2081
2082 if (!cctx->aes.ccm.iv_set)
2083 return -1;
2084
2085 if (out == NULL) {
2086 /* Update(): Pass message length. */
2087 if (in == NULL) {
2088 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2089
2090 cctx->aes.ccm.len_set = 1;
2091 return len;
2092 }
2093
2094 /* Update(): Process aad. */
2095 if (!cctx->aes.ccm.len_set && len)
2096 return -1;
2097
2098 s390x_aes_ccm_aad(cctx, in, len);
2099 return len;
2100 }
2101
2102 /* The tag must be set before actually decrypting data */
2103 if (!enc && !cctx->aes.ccm.tag_set)
2104 return -1;
2105
2106 /* Update(): Process message. */
2107
2108 if (!cctx->aes.ccm.len_set) {
2109 /*-
2110 * In case message length was not previously set explicitly via
2111 * Update(), set it now.
2112 */
2113 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2114
2115 cctx->aes.ccm.len_set = 1;
2116 }
2117
2118 if (enc) {
2119 if (s390x_aes_ccm(cctx, in, out, len, enc))
2120 return -1;
2121
2122 cctx->aes.ccm.tag_set = 1;
2123 return len;
2124 } else {
2125 rv = -1;
2126
2127 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2128 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2129 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2130 cctx->aes.ccm.m))
2131 rv = len;
2132 }
2133
2134 if (rv == -1)
2135 OPENSSL_cleanse(out, len);
2136
2137 cctx->aes.ccm.iv_set = 0;
2138 cctx->aes.ccm.tag_set = 0;
2139 cctx->aes.ccm.len_set = 0;
2140 return rv;
2141 }
2142 }
2143
2144 /*-
2145 * Performs various operations on the context structure depending on control
2146 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2147 * Code is big-endian.
2148 */
s390x_aes_ccm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)2149 static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2150 {
2151 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2152 unsigned char *buf;
2153 int enc, len;
2154
2155 switch (type) {
2156 case EVP_CTRL_INIT:
2157 cctx->aes.ccm.key_set = 0;
2158 cctx->aes.ccm.iv_set = 0;
2159 cctx->aes.ccm.l = 8;
2160 cctx->aes.ccm.m = 12;
2161 cctx->aes.ccm.tag_set = 0;
2162 cctx->aes.ccm.len_set = 0;
2163 cctx->aes.ccm.tls_aad_len = -1;
2164 return 1;
2165
2166 case EVP_CTRL_GET_IVLEN:
2167 *(int *)ptr = 15 - cctx->aes.ccm.l;
2168 return 1;
2169
2170 case EVP_CTRL_AEAD_TLS1_AAD:
2171 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2172 return 0;
2173
2174 /* Save the aad for later use. */
2175 buf = EVP_CIPHER_CTX_buf_noconst(c);
2176 memcpy(buf, ptr, arg);
2177 cctx->aes.ccm.tls_aad_len = arg;
2178
2179 len = buf[arg - 2] << 8 | buf[arg - 1];
2180 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2181 return 0;
2182
2183 /* Correct length for explicit iv. */
2184 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2185
2186 enc = EVP_CIPHER_CTX_is_encrypting(c);
2187 if (!enc) {
2188 if (len < cctx->aes.ccm.m)
2189 return 0;
2190
2191 /* Correct length for tag. */
2192 len -= cctx->aes.ccm.m;
2193 }
2194
2195 buf[arg - 2] = len >> 8;
2196 buf[arg - 1] = len & 0xff;
2197
2198 /* Extra padding: tag appended to record. */
2199 return cctx->aes.ccm.m;
2200
2201 case EVP_CTRL_CCM_SET_IV_FIXED:
2202 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2203 return 0;
2204
2205 /* Copy to first part of the iv. */
2206 memcpy(c->iv, ptr, arg);
2207 return 1;
2208
2209 case EVP_CTRL_AEAD_SET_IVLEN:
2210 arg = 15 - arg;
2211 /* fall-through */
2212
2213 case EVP_CTRL_CCM_SET_L:
2214 if (arg < 2 || arg > 8)
2215 return 0;
2216
2217 cctx->aes.ccm.l = arg;
2218 return 1;
2219
2220 case EVP_CTRL_AEAD_SET_TAG:
2221 if ((arg & 1) || arg < 4 || arg > 16)
2222 return 0;
2223
2224 enc = EVP_CIPHER_CTX_is_encrypting(c);
2225 if (enc && ptr)
2226 return 0;
2227
2228 if (ptr) {
2229 cctx->aes.ccm.tag_set = 1;
2230 buf = EVP_CIPHER_CTX_buf_noconst(c);
2231 memcpy(buf, ptr, arg);
2232 }
2233
2234 cctx->aes.ccm.m = arg;
2235 return 1;
2236
2237 case EVP_CTRL_AEAD_GET_TAG:
2238 enc = EVP_CIPHER_CTX_is_encrypting(c);
2239 if (!enc || !cctx->aes.ccm.tag_set)
2240 return 0;
2241
2242 if (arg < cctx->aes.ccm.m)
2243 return 0;
2244
2245 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2246 cctx->aes.ccm.tag_set = 0;
2247 cctx->aes.ccm.iv_set = 0;
2248 cctx->aes.ccm.len_set = 0;
2249 return 1;
2250
2251 case EVP_CTRL_COPY:
2252 return 1;
2253
2254 default:
2255 return -1;
2256 }
2257 }
2258
2259 # define s390x_aes_ccm_cleanup aes_ccm_cleanup
2260
2261 # ifndef OPENSSL_NO_OCB
2262 # define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2263
2264 # define s390x_aes_ocb_init_key aes_ocb_init_key
2265 static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2266 const unsigned char *iv, int enc);
2267 # define s390x_aes_ocb_cipher aes_ocb_cipher
2268 static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2269 const unsigned char *in, size_t len);
2270 # define s390x_aes_ocb_cleanup aes_ocb_cleanup
2271 static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2272 # define s390x_aes_ocb_ctrl aes_ocb_ctrl
2273 static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2274 # endif
2275
2276 # ifndef OPENSSL_NO_SIV
2277 # define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2278
2279 # define s390x_aes_siv_init_key aes_siv_init_key
2280 # define s390x_aes_siv_cipher aes_siv_cipher
2281 # define s390x_aes_siv_cleanup aes_siv_cleanup
2282 # define s390x_aes_siv_ctrl aes_siv_ctrl
2283 # endif
2284
2285 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2286 MODE,flags) \
2287 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2288 nid##_##keylen##_##nmode,blocksize, \
2289 keylen / 8, \
2290 ivlen, \
2291 flags | EVP_CIPH_##MODE##_MODE, \
2292 EVP_ORIG_GLOBAL, \
2293 s390x_aes_##mode##_init_key, \
2294 s390x_aes_##mode##_cipher, \
2295 NULL, \
2296 sizeof(S390X_AES_##MODE##_CTX), \
2297 NULL, \
2298 NULL, \
2299 NULL, \
2300 NULL \
2301 }; \
2302 static const EVP_CIPHER aes_##keylen##_##mode = { \
2303 nid##_##keylen##_##nmode, \
2304 blocksize, \
2305 keylen / 8, \
2306 ivlen, \
2307 flags | EVP_CIPH_##MODE##_MODE, \
2308 EVP_ORIG_GLOBAL, \
2309 aes_init_key, \
2310 aes_##mode##_cipher, \
2311 NULL, \
2312 sizeof(EVP_AES_KEY), \
2313 NULL, \
2314 NULL, \
2315 NULL, \
2316 NULL \
2317 }; \
2318 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2319 { \
2320 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2321 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2322 }
2323
2324 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2325 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2326 nid##_##keylen##_##mode, \
2327 blocksize, \
2328 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2329 ivlen, \
2330 flags | EVP_CIPH_##MODE##_MODE, \
2331 EVP_ORIG_GLOBAL, \
2332 s390x_aes_##mode##_init_key, \
2333 s390x_aes_##mode##_cipher, \
2334 s390x_aes_##mode##_cleanup, \
2335 sizeof(S390X_AES_##MODE##_CTX), \
2336 NULL, \
2337 NULL, \
2338 s390x_aes_##mode##_ctrl, \
2339 NULL \
2340 }; \
2341 static const EVP_CIPHER aes_##keylen##_##mode = { \
2342 nid##_##keylen##_##mode,blocksize, \
2343 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2344 ivlen, \
2345 flags | EVP_CIPH_##MODE##_MODE, \
2346 EVP_ORIG_GLOBAL, \
2347 aes_##mode##_init_key, \
2348 aes_##mode##_cipher, \
2349 aes_##mode##_cleanup, \
2350 sizeof(EVP_AES_##MODE##_CTX), \
2351 NULL, \
2352 NULL, \
2353 aes_##mode##_ctrl, \
2354 NULL \
2355 }; \
2356 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2357 { \
2358 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2359 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2360 }
2361
2362 #else
2363
2364 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2365 static const EVP_CIPHER aes_##keylen##_##mode = { \
2366 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2367 flags|EVP_CIPH_##MODE##_MODE, \
2368 EVP_ORIG_GLOBAL, \
2369 aes_init_key, \
2370 aes_##mode##_cipher, \
2371 NULL, \
2372 sizeof(EVP_AES_KEY), \
2373 NULL,NULL,NULL,NULL }; \
2374 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2375 { return &aes_##keylen##_##mode; }
2376
2377 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2378 static const EVP_CIPHER aes_##keylen##_##mode = { \
2379 nid##_##keylen##_##mode,blocksize, \
2380 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2381 ivlen, \
2382 flags|EVP_CIPH_##MODE##_MODE, \
2383 EVP_ORIG_GLOBAL, \
2384 aes_##mode##_init_key, \
2385 aes_##mode##_cipher, \
2386 aes_##mode##_cleanup, \
2387 sizeof(EVP_AES_##MODE##_CTX), \
2388 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2389 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2390 { return &aes_##keylen##_##mode; }
2391
2392 #endif
2393
2394 #define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2395 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2396 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2397 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2398 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2399 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2400 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2401 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2402
aes_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)2403 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2404 const unsigned char *iv, int enc)
2405 {
2406 int ret, mode;
2407 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2408 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2409
2410 if (keylen <= 0) {
2411 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2412 return 0;
2413 }
2414
2415 mode = EVP_CIPHER_CTX_get_mode(ctx);
2416 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2417 && !enc) {
2418 #ifdef HWAES_CAPABLE
2419 if (HWAES_CAPABLE) {
2420 ret = HWAES_set_decrypt_key(key, keylen, &dat->ks.ks);
2421 dat->block = (block128_f) HWAES_decrypt;
2422 dat->stream.cbc = NULL;
2423 # ifdef HWAES_cbc_encrypt
2424 if (mode == EVP_CIPH_CBC_MODE)
2425 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2426 # endif
2427 } else
2428 #endif
2429 #ifdef BSAES_CAPABLE
2430 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2431 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2432 dat->block = (block128_f) AES_decrypt;
2433 dat->stream.cbc = (cbc128_f) ossl_bsaes_cbc_encrypt;
2434 } else
2435 #endif
2436 #ifdef VPAES_CAPABLE
2437 if (VPAES_CAPABLE) {
2438 ret = vpaes_set_decrypt_key(key, keylen, &dat->ks.ks);
2439 dat->block = (block128_f) vpaes_decrypt;
2440 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2441 (cbc128_f) vpaes_cbc_encrypt : NULL;
2442 } else
2443 #endif
2444 {
2445 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2446 dat->block = (block128_f) AES_decrypt;
2447 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2448 (cbc128_f) AES_cbc_encrypt : NULL;
2449 }
2450 } else
2451 #ifdef HWAES_CAPABLE
2452 if (HWAES_CAPABLE) {
2453 ret = HWAES_set_encrypt_key(key, keylen, &dat->ks.ks);
2454 dat->block = (block128_f) HWAES_encrypt;
2455 dat->stream.cbc = NULL;
2456 # ifdef HWAES_cbc_encrypt
2457 if (mode == EVP_CIPH_CBC_MODE)
2458 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2459 else
2460 # endif
2461 # ifdef HWAES_ctr32_encrypt_blocks
2462 if (mode == EVP_CIPH_CTR_MODE)
2463 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2464 else
2465 # endif
2466 (void)0; /* terminate potentially open 'else' */
2467 } else
2468 #endif
2469 #ifdef BSAES_CAPABLE
2470 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2471 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2472 dat->block = (block128_f) AES_encrypt;
2473 dat->stream.ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2474 } else
2475 #endif
2476 #ifdef VPAES_CAPABLE
2477 if (VPAES_CAPABLE) {
2478 ret = vpaes_set_encrypt_key(key, keylen, &dat->ks.ks);
2479 dat->block = (block128_f) vpaes_encrypt;
2480 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2481 (cbc128_f) vpaes_cbc_encrypt : NULL;
2482 } else
2483 #endif
2484 {
2485 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2486 dat->block = (block128_f) AES_encrypt;
2487 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2488 (cbc128_f) AES_cbc_encrypt : NULL;
2489 #ifdef AES_CTR_ASM
2490 if (mode == EVP_CIPH_CTR_MODE)
2491 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2492 #endif
2493 }
2494
2495 if (ret < 0) {
2496 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
2497 return 0;
2498 }
2499
2500 return 1;
2501 }
2502
aes_cbc_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2503 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2504 const unsigned char *in, size_t len)
2505 {
2506 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2507
2508 if (dat->stream.cbc)
2509 (*dat->stream.cbc) (in, out, len, &dat->ks, ctx->iv,
2510 EVP_CIPHER_CTX_is_encrypting(ctx));
2511 else if (EVP_CIPHER_CTX_is_encrypting(ctx))
2512 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv,
2513 dat->block);
2514 else
2515 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2516 ctx->iv, dat->block);
2517
2518 return 1;
2519 }
2520
aes_ecb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2521 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2522 const unsigned char *in, size_t len)
2523 {
2524 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
2525 size_t i;
2526 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2527
2528 if (len < bl)
2529 return 1;
2530
2531 for (i = 0, len -= bl; i <= len; i += bl)
2532 (*dat->block) (in + i, out + i, &dat->ks);
2533
2534 return 1;
2535 }
2536
aes_ofb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2537 static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2538 const unsigned char *in, size_t len)
2539 {
2540 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2541
2542 int num = EVP_CIPHER_CTX_get_num(ctx);
2543 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2544 ctx->iv, &num, dat->block);
2545 EVP_CIPHER_CTX_set_num(ctx, num);
2546 return 1;
2547 }
2548
aes_cfb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2549 static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2550 const unsigned char *in, size_t len)
2551 {
2552 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2553
2554 int num = EVP_CIPHER_CTX_get_num(ctx);
2555 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2556 ctx->iv, &num,
2557 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2558 EVP_CIPHER_CTX_set_num(ctx, num);
2559 return 1;
2560 }
2561
aes_cfb8_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2562 static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2563 const unsigned char *in, size_t len)
2564 {
2565 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2566
2567 int num = EVP_CIPHER_CTX_get_num(ctx);
2568 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2569 ctx->iv, &num,
2570 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2571 EVP_CIPHER_CTX_set_num(ctx, num);
2572 return 1;
2573 }
2574
aes_cfb1_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2575 static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2576 const unsigned char *in, size_t len)
2577 {
2578 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2579
2580 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2581 int num = EVP_CIPHER_CTX_get_num(ctx);
2582 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2583 ctx->iv, &num,
2584 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2585 EVP_CIPHER_CTX_set_num(ctx, num);
2586 return 1;
2587 }
2588
2589 while (len >= MAXBITCHUNK) {
2590 int num = EVP_CIPHER_CTX_get_num(ctx);
2591 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2592 ctx->iv, &num,
2593 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2594 EVP_CIPHER_CTX_set_num(ctx, num);
2595 len -= MAXBITCHUNK;
2596 out += MAXBITCHUNK;
2597 in += MAXBITCHUNK;
2598 }
2599 if (len) {
2600 int num = EVP_CIPHER_CTX_get_num(ctx);
2601 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2602 ctx->iv, &num,
2603 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2604 EVP_CIPHER_CTX_set_num(ctx, num);
2605 }
2606
2607 return 1;
2608 }
2609
aes_ctr_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2610 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2611 const unsigned char *in, size_t len)
2612 {
2613 int n = EVP_CIPHER_CTX_get_num(ctx);
2614 unsigned int num;
2615 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2616
2617 if (n < 0)
2618 return 0;
2619 num = (unsigned int)n;
2620
2621 if (dat->stream.ctr)
2622 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2623 ctx->iv,
2624 EVP_CIPHER_CTX_buf_noconst(ctx),
2625 &num, dat->stream.ctr);
2626 else
2627 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2628 ctx->iv,
2629 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2630 dat->block);
2631 EVP_CIPHER_CTX_set_num(ctx, num);
2632 return 1;
2633 }
2634
2635 BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2636 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2637 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2638
aes_gcm_cleanup(EVP_CIPHER_CTX * c)2639 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2640 {
2641 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2642 if (gctx == NULL)
2643 return 0;
2644 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2645 if (gctx->iv != c->iv)
2646 OPENSSL_free(gctx->iv);
2647 return 1;
2648 }
2649
aes_gcm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)2650 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2651 {
2652 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2653 switch (type) {
2654 case EVP_CTRL_INIT:
2655 gctx->key_set = 0;
2656 gctx->iv_set = 0;
2657 gctx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
2658 gctx->iv = c->iv;
2659 gctx->taglen = -1;
2660 gctx->iv_gen = 0;
2661 gctx->tls_aad_len = -1;
2662 return 1;
2663
2664 case EVP_CTRL_GET_IVLEN:
2665 *(int *)ptr = gctx->ivlen;
2666 return 1;
2667
2668 case EVP_CTRL_AEAD_SET_IVLEN:
2669 if (arg <= 0)
2670 return 0;
2671 /* Allocate memory for IV if needed */
2672 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2673 if (gctx->iv != c->iv)
2674 OPENSSL_free(gctx->iv);
2675 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL)
2676 return 0;
2677 }
2678 gctx->ivlen = arg;
2679 return 1;
2680
2681 case EVP_CTRL_AEAD_SET_TAG:
2682 if (arg <= 0 || arg > 16 || c->encrypt)
2683 return 0;
2684 memcpy(c->buf, ptr, arg);
2685 gctx->taglen = arg;
2686 return 1;
2687
2688 case EVP_CTRL_AEAD_GET_TAG:
2689 if (arg <= 0 || arg > 16 || !c->encrypt
2690 || gctx->taglen < 0)
2691 return 0;
2692 memcpy(ptr, c->buf, arg);
2693 return 1;
2694
2695 case EVP_CTRL_GCM_SET_IV_FIXED:
2696 /* Special case: -1 length restores whole IV */
2697 if (arg == -1) {
2698 memcpy(gctx->iv, ptr, gctx->ivlen);
2699 gctx->iv_gen = 1;
2700 return 1;
2701 }
2702 /*
2703 * Fixed field must be at least 4 bytes and invocation field at least
2704 * 8.
2705 */
2706 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2707 return 0;
2708 if (arg)
2709 memcpy(gctx->iv, ptr, arg);
2710 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2711 return 0;
2712 gctx->iv_gen = 1;
2713 return 1;
2714
2715 case EVP_CTRL_GCM_IV_GEN:
2716 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2717 return 0;
2718 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2719 if (arg <= 0 || arg > gctx->ivlen)
2720 arg = gctx->ivlen;
2721 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2722 /*
2723 * Invocation field will be at least 8 bytes in size and so no need
2724 * to check wrap around or increment more than last 8 bytes.
2725 */
2726 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2727 gctx->iv_set = 1;
2728 return 1;
2729
2730 case EVP_CTRL_GCM_SET_IV_INV:
2731 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2732 return 0;
2733 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2734 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2735 gctx->iv_set = 1;
2736 return 1;
2737
2738 case EVP_CTRL_AEAD_TLS1_AAD:
2739 /* Save the AAD for later use */
2740 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2741 return 0;
2742 memcpy(c->buf, ptr, arg);
2743 gctx->tls_aad_len = arg;
2744 gctx->tls_enc_records = 0;
2745 {
2746 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2747 /* Correct length for explicit IV */
2748 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2749 return 0;
2750 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2751 /* If decrypting correct for tag too */
2752 if (!c->encrypt) {
2753 if (len < EVP_GCM_TLS_TAG_LEN)
2754 return 0;
2755 len -= EVP_GCM_TLS_TAG_LEN;
2756 }
2757 c->buf[arg - 2] = len >> 8;
2758 c->buf[arg - 1] = len & 0xff;
2759 }
2760 /* Extra padding: tag appended to record */
2761 return EVP_GCM_TLS_TAG_LEN;
2762
2763 case EVP_CTRL_COPY:
2764 {
2765 EVP_CIPHER_CTX *out = ptr;
2766 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2767 if (gctx->gcm.key) {
2768 if (gctx->gcm.key != &gctx->ks)
2769 return 0;
2770 gctx_out->gcm.key = &gctx_out->ks;
2771 }
2772 if (gctx->iv == c->iv)
2773 gctx_out->iv = out->iv;
2774 else {
2775 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL)
2776 return 0;
2777 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2778 }
2779 return 1;
2780 }
2781
2782 default:
2783 return -1;
2784
2785 }
2786 }
2787
aes_gcm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)2788 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2789 const unsigned char *iv, int enc)
2790 {
2791 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2792
2793 if (iv == NULL && key == NULL)
2794 return 1;
2795
2796 if (key != NULL) {
2797 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2798
2799 if (keylen <= 0) {
2800 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2801 return 0;
2802 }
2803 do {
2804 #ifdef HWAES_CAPABLE
2805 if (HWAES_CAPABLE) {
2806 HWAES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2807 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2808 (block128_f) HWAES_encrypt);
2809 # ifdef HWAES_ctr32_encrypt_blocks
2810 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2811 # else
2812 gctx->ctr = NULL;
2813 # endif
2814 break;
2815 } else
2816 #endif
2817 #ifdef BSAES_CAPABLE
2818 if (BSAES_CAPABLE) {
2819 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2820 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2821 (block128_f) AES_encrypt);
2822 gctx->ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2823 break;
2824 } else
2825 #endif
2826 #ifdef VPAES_CAPABLE
2827 if (VPAES_CAPABLE) {
2828 vpaes_set_encrypt_key(key, keylen, &gctx->ks.ks);
2829 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2830 (block128_f) vpaes_encrypt);
2831 gctx->ctr = NULL;
2832 break;
2833 } else
2834 #endif
2835 (void)0; /* terminate potentially open 'else' */
2836
2837 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2838 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2839 (block128_f) AES_encrypt);
2840 #ifdef AES_CTR_ASM
2841 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2842 #else
2843 gctx->ctr = NULL;
2844 #endif
2845 } while (0);
2846
2847 /*
2848 * If we have an iv can set it directly, otherwise use saved IV.
2849 */
2850 if (iv == NULL && gctx->iv_set)
2851 iv = gctx->iv;
2852 if (iv) {
2853 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2854 gctx->iv_set = 1;
2855 }
2856 gctx->key_set = 1;
2857 } else {
2858 /* If key set use IV, otherwise copy */
2859 if (gctx->key_set)
2860 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2861 else
2862 memcpy(gctx->iv, iv, gctx->ivlen);
2863 gctx->iv_set = 1;
2864 gctx->iv_gen = 0;
2865 }
2866 return 1;
2867 }
2868
2869 /*
2870 * Handle TLS GCM packet format. This consists of the last portion of the IV
2871 * followed by the payload and finally the tag. On encrypt generate IV,
2872 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2873 * and verify tag.
2874 */
2875
aes_gcm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)2876 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2877 const unsigned char *in, size_t len)
2878 {
2879 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2880 int rv = -1;
2881 /* Encrypt/decrypt must be performed in place */
2882 if (out != in
2883 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2884 return -1;
2885
2886 /*
2887 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2888 * Requirements from SP 800-38D". The requirements is for one party to the
2889 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2890 * side only.
2891 */
2892 if (EVP_CIPHER_CTX_is_encrypting(ctx) && ++gctx->tls_enc_records == 0) {
2893 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
2894 goto err;
2895 }
2896
2897 /*
2898 * Set IV from start of buffer or generate IV and write to start of
2899 * buffer.
2900 */
2901 if (EVP_CIPHER_CTX_ctrl(ctx,
2902 EVP_CIPHER_CTX_is_encrypting(ctx) ?
2903 EVP_CTRL_GCM_IV_GEN : EVP_CTRL_GCM_SET_IV_INV,
2904 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2905 goto err;
2906 /* Use saved AAD */
2907 if (CRYPTO_gcm128_aad(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx),
2908 gctx->tls_aad_len))
2909 goto err;
2910 /* Fix buffer and length to point to payload */
2911 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2912 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2913 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2914 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
2915 /* Encrypt payload */
2916 if (gctx->ctr) {
2917 size_t bulk = 0;
2918 #if defined(AES_GCM_ASM)
2919 if (len >= 32 && AES_GCM_ASM(gctx)) {
2920 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2921 return -1;
2922
2923 bulk = AES_gcm_encrypt(in, out, len,
2924 gctx->gcm.key,
2925 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2926 gctx->gcm.len.u[1] += bulk;
2927 }
2928 #endif
2929 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2930 in + bulk,
2931 out + bulk,
2932 len - bulk, gctx->ctr))
2933 goto err;
2934 } else {
2935 size_t bulk = 0;
2936 #if defined(AES_GCM_ASM2)
2937 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2938 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2939 return -1;
2940
2941 bulk = AES_gcm_encrypt(in, out, len,
2942 gctx->gcm.key,
2943 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2944 gctx->gcm.len.u[1] += bulk;
2945 }
2946 #endif
2947 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2948 in + bulk, out + bulk, len - bulk))
2949 goto err;
2950 }
2951 out += len;
2952 /* Finally write tag */
2953 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2954 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2955 } else {
2956 /* Decrypt */
2957 if (gctx->ctr) {
2958 size_t bulk = 0;
2959 #if defined(AES_GCM_ASM)
2960 if (len >= 16 && AES_GCM_ASM(gctx)) {
2961 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2962 return -1;
2963
2964 bulk = AES_gcm_decrypt(in, out, len,
2965 gctx->gcm.key,
2966 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2967 gctx->gcm.len.u[1] += bulk;
2968 }
2969 #endif
2970 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2971 in + bulk,
2972 out + bulk,
2973 len - bulk, gctx->ctr))
2974 goto err;
2975 } else {
2976 size_t bulk = 0;
2977 #if defined(AES_GCM_ASM2)
2978 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2979 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2980 return -1;
2981
2982 bulk = AES_gcm_decrypt(in, out, len,
2983 gctx->gcm.key,
2984 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2985 gctx->gcm.len.u[1] += bulk;
2986 }
2987 #endif
2988 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
2989 in + bulk, out + bulk, len - bulk))
2990 goto err;
2991 }
2992 /* Retrieve tag */
2993 CRYPTO_gcm128_tag(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx),
2994 EVP_GCM_TLS_TAG_LEN);
2995 /* If tag mismatch wipe buffer */
2996 if (CRYPTO_memcmp(EVP_CIPHER_CTX_buf_noconst(ctx), in + len,
2997 EVP_GCM_TLS_TAG_LEN)) {
2998 OPENSSL_cleanse(out, len);
2999 goto err;
3000 }
3001 rv = len;
3002 }
3003
3004 err:
3005 gctx->iv_set = 0;
3006 gctx->tls_aad_len = -1;
3007 return rv;
3008 }
3009
3010 #ifdef FIPS_MODULE
3011 /*
3012 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
3013 *
3014 * See also 8.2.2 RBG-based construction.
3015 * Random construction consists of a free field (which can be NULL) and a
3016 * random field which will use a DRBG that can return at least 96 bits of
3017 * entropy strength. (The DRBG must be seeded by the FIPS module).
3018 */
aes_gcm_iv_generate(EVP_AES_GCM_CTX * gctx,int offset)3019 static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
3020 {
3021 int sz = gctx->ivlen - offset;
3022
3023 /* Must be at least 96 bits */
3024 if (sz <= 0 || gctx->ivlen < 12)
3025 return 0;
3026
3027 /* Use DRBG to generate random iv */
3028 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
3029 return 0;
3030 return 1;
3031 }
3032 #endif /* FIPS_MODULE */
3033
aes_gcm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3034 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3035 const unsigned char *in, size_t len)
3036 {
3037 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3038
3039 /* If not set up, return error */
3040 if (!gctx->key_set)
3041 return -1;
3042
3043 if (gctx->tls_aad_len >= 0)
3044 return aes_gcm_tls_cipher(ctx, out, in, len);
3045
3046 #ifdef FIPS_MODULE
3047 /*
3048 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
3049 * The IV can still be set externally (the security policy will state that
3050 * this is not FIPS compliant). There are some applications
3051 * where setting the IV externally is the only option available.
3052 */
3053 if (!gctx->iv_set) {
3054 if (!EVP_CIPHER_CTX_is_encrypting(ctx) || !aes_gcm_iv_generate(gctx, 0))
3055 return -1;
3056 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
3057 gctx->iv_set = 1;
3058 gctx->iv_gen_rand = 1;
3059 }
3060 #else
3061 if (!gctx->iv_set)
3062 return -1;
3063 #endif /* FIPS_MODULE */
3064
3065 if (in) {
3066 if (out == NULL) {
3067 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
3068 return -1;
3069 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3070 if (gctx->ctr) {
3071 size_t bulk = 0;
3072 #if defined(AES_GCM_ASM)
3073 if (len >= 32 && AES_GCM_ASM(gctx)) {
3074 size_t res = (16 - gctx->gcm.mres) % 16;
3075
3076 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3077 return -1;
3078
3079 bulk = AES_gcm_encrypt(in + res,
3080 out + res, len - res,
3081 gctx->gcm.key, gctx->gcm.Yi.c,
3082 gctx->gcm.Xi.u);
3083 gctx->gcm.len.u[1] += bulk;
3084 bulk += res;
3085 }
3086 #endif
3087 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
3088 in + bulk,
3089 out + bulk,
3090 len - bulk, gctx->ctr))
3091 return -1;
3092 } else {
3093 size_t bulk = 0;
3094 #if defined(AES_GCM_ASM2)
3095 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3096 size_t res = (16 - gctx->gcm.mres) % 16;
3097
3098 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3099 return -1;
3100
3101 bulk = AES_gcm_encrypt(in + res,
3102 out + res, len - res,
3103 gctx->gcm.key, gctx->gcm.Yi.c,
3104 gctx->gcm.Xi.u);
3105 gctx->gcm.len.u[1] += bulk;
3106 bulk += res;
3107 }
3108 #endif
3109 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3110 in + bulk, out + bulk, len - bulk))
3111 return -1;
3112 }
3113 } else {
3114 if (gctx->ctr) {
3115 size_t bulk = 0;
3116 #if defined(AES_GCM_ASM)
3117 if (len >= 16 && AES_GCM_ASM(gctx)) {
3118 size_t res = (16 - gctx->gcm.mres) % 16;
3119
3120 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3121 return -1;
3122
3123 bulk = AES_gcm_decrypt(in + res,
3124 out + res, len - res,
3125 gctx->gcm.key,
3126 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3127 gctx->gcm.len.u[1] += bulk;
3128 bulk += res;
3129 }
3130 #endif
3131 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3132 in + bulk,
3133 out + bulk,
3134 len - bulk, gctx->ctr))
3135 return -1;
3136 } else {
3137 size_t bulk = 0;
3138 #if defined(AES_GCM_ASM2)
3139 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3140 size_t res = (16 - gctx->gcm.mres) % 16;
3141
3142 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3143 return -1;
3144
3145 bulk = AES_gcm_decrypt(in + res,
3146 out + res, len - res,
3147 gctx->gcm.key,
3148 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3149 gctx->gcm.len.u[1] += bulk;
3150 bulk += res;
3151 }
3152 #endif
3153 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3154 in + bulk, out + bulk, len - bulk))
3155 return -1;
3156 }
3157 }
3158 return len;
3159 } else {
3160 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
3161 if (gctx->taglen < 0)
3162 return -1;
3163 if (CRYPTO_gcm128_finish(&gctx->gcm,
3164 EVP_CIPHER_CTX_buf_noconst(ctx),
3165 gctx->taglen) != 0)
3166 return -1;
3167 gctx->iv_set = 0;
3168 return 0;
3169 }
3170 CRYPTO_gcm128_tag(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx), 16);
3171 gctx->taglen = 16;
3172 /* Don't reuse the IV */
3173 gctx->iv_set = 0;
3174 return 0;
3175 }
3176
3177 }
3178
3179 #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3180 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3181 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3182 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3183
3184 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3185 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3186 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3187 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3188 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3189 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3190
aes_xts_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)3191 static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3192 {
3193 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3194
3195 if (type == EVP_CTRL_COPY) {
3196 EVP_CIPHER_CTX *out = ptr;
3197 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3198
3199 if (xctx->xts.key1) {
3200 if (xctx->xts.key1 != &xctx->ks1)
3201 return 0;
3202 xctx_out->xts.key1 = &xctx_out->ks1;
3203 }
3204 if (xctx->xts.key2) {
3205 if (xctx->xts.key2 != &xctx->ks2)
3206 return 0;
3207 xctx_out->xts.key2 = &xctx_out->ks2;
3208 }
3209 return 1;
3210 } else if (type != EVP_CTRL_INIT)
3211 return -1;
3212 /* key1 and key2 are used as an indicator both key and IV are set */
3213 xctx->xts.key1 = NULL;
3214 xctx->xts.key2 = NULL;
3215 return 1;
3216 }
3217
aes_xts_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3218 static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3219 const unsigned char *iv, int enc)
3220 {
3221 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3222
3223 if (iv == NULL && key == NULL)
3224 return 1;
3225
3226 if (key != NULL) {
3227 do {
3228 /* The key is two half length keys in reality */
3229 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
3230 const int bytes = keylen / 2;
3231 const int bits = bytes * 8;
3232
3233 if (keylen <= 0) {
3234 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3235 return 0;
3236 }
3237 /*
3238 * Verify that the two keys are different.
3239 *
3240 * This addresses the vulnerability described in Rogaway's
3241 * September 2004 paper:
3242 *
3243 * "Efficient Instantiations of Tweakable Blockciphers and
3244 * Refinements to Modes OCB and PMAC".
3245 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3246 *
3247 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3248 * that:
3249 * "The check for Key_1 != Key_2 shall be done at any place
3250 * BEFORE using the keys in the XTS-AES algorithm to process
3251 * data with them."
3252 */
3253 if ((!allow_insecure_decrypt || enc)
3254 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3255 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
3256 return 0;
3257 }
3258
3259 #ifdef AES_XTS_ASM
3260 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3261 #else
3262 xctx->stream = NULL;
3263 #endif
3264 /* key_len is two AES keys */
3265 #ifdef HWAES_CAPABLE
3266 if (HWAES_CAPABLE) {
3267 if (enc) {
3268 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3269 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3270 # ifdef HWAES_xts_encrypt
3271 xctx->stream = HWAES_xts_encrypt;
3272 # endif
3273 } else {
3274 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3275 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3276 # ifdef HWAES_xts_decrypt
3277 xctx->stream = HWAES_xts_decrypt;
3278 #endif
3279 }
3280
3281 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3282 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3283
3284 xctx->xts.key1 = &xctx->ks1;
3285 break;
3286 } else
3287 #endif
3288 #ifdef BSAES_CAPABLE
3289 if (BSAES_CAPABLE)
3290 xctx->stream = enc ? ossl_bsaes_xts_encrypt : ossl_bsaes_xts_decrypt;
3291 else
3292 #endif
3293 #ifdef VPAES_CAPABLE
3294 if (VPAES_CAPABLE) {
3295 if (enc) {
3296 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3297 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3298 } else {
3299 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3300 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3301 }
3302
3303 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3304 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3305
3306 xctx->xts.key1 = &xctx->ks1;
3307 break;
3308 } else
3309 #endif
3310 (void)0; /* terminate potentially open 'else' */
3311
3312 if (enc) {
3313 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3314 xctx->xts.block1 = (block128_f) AES_encrypt;
3315 } else {
3316 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3317 xctx->xts.block1 = (block128_f) AES_decrypt;
3318 }
3319
3320 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3321 xctx->xts.block2 = (block128_f) AES_encrypt;
3322
3323 xctx->xts.key1 = &xctx->ks1;
3324 } while (0);
3325 }
3326
3327 if (iv) {
3328 xctx->xts.key2 = &xctx->ks2;
3329 memcpy(ctx->iv, iv, 16);
3330 }
3331
3332 return 1;
3333 }
3334
aes_xts_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3335 static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3336 const unsigned char *in, size_t len)
3337 {
3338 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3339
3340 if (xctx->xts.key1 == NULL
3341 || xctx->xts.key2 == NULL
3342 || out == NULL
3343 || in == NULL
3344 || len < AES_BLOCK_SIZE)
3345 return 0;
3346
3347 /*
3348 * Impose a limit of 2^20 blocks per data unit as specified by
3349 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3350 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3351 * NIST SP 800-38E mandates the same limit.
3352 */
3353 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3354 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3355 return 0;
3356 }
3357
3358 if (xctx->stream)
3359 (*xctx->stream) (in, out, len,
3360 xctx->xts.key1, xctx->xts.key2,
3361 ctx->iv);
3362 else if (CRYPTO_xts128_encrypt(&xctx->xts, ctx->iv, in, out, len,
3363 EVP_CIPHER_CTX_is_encrypting(ctx)))
3364 return 0;
3365 return 1;
3366 }
3367
3368 #define aes_xts_cleanup NULL
3369
3370 #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3371 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3372 | EVP_CIPH_CUSTOM_COPY)
3373
3374 BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3375 BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3376
aes_ccm_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)3377 static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3378 {
3379 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3380 switch (type) {
3381 case EVP_CTRL_INIT:
3382 cctx->key_set = 0;
3383 cctx->iv_set = 0;
3384 cctx->L = 8;
3385 cctx->M = 12;
3386 cctx->tag_set = 0;
3387 cctx->len_set = 0;
3388 cctx->tls_aad_len = -1;
3389 return 1;
3390
3391 case EVP_CTRL_GET_IVLEN:
3392 *(int *)ptr = 15 - cctx->L;
3393 return 1;
3394
3395 case EVP_CTRL_AEAD_TLS1_AAD:
3396 /* Save the AAD for later use */
3397 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3398 return 0;
3399 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3400 cctx->tls_aad_len = arg;
3401 {
3402 uint16_t len =
3403 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3404 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3405 /* Correct length for explicit IV */
3406 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3407 return 0;
3408 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3409 /* If decrypting correct for tag too */
3410 if (!EVP_CIPHER_CTX_is_encrypting(c)) {
3411 if (len < cctx->M)
3412 return 0;
3413 len -= cctx->M;
3414 }
3415 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3416 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3417 }
3418 /* Extra padding: tag appended to record */
3419 return cctx->M;
3420
3421 case EVP_CTRL_CCM_SET_IV_FIXED:
3422 /* Sanity check length */
3423 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3424 return 0;
3425 /* Just copy to first part of IV */
3426 memcpy(c->iv, ptr, arg);
3427 return 1;
3428
3429 case EVP_CTRL_AEAD_SET_IVLEN:
3430 arg = 15 - arg;
3431 /* fall through */
3432 case EVP_CTRL_CCM_SET_L:
3433 if (arg < 2 || arg > 8)
3434 return 0;
3435 cctx->L = arg;
3436 return 1;
3437
3438 case EVP_CTRL_AEAD_SET_TAG:
3439 if ((arg & 1) || arg < 4 || arg > 16)
3440 return 0;
3441 if (EVP_CIPHER_CTX_is_encrypting(c) && ptr)
3442 return 0;
3443 if (ptr) {
3444 cctx->tag_set = 1;
3445 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3446 }
3447 cctx->M = arg;
3448 return 1;
3449
3450 case EVP_CTRL_AEAD_GET_TAG:
3451 if (!EVP_CIPHER_CTX_is_encrypting(c) || !cctx->tag_set)
3452 return 0;
3453 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3454 return 0;
3455 cctx->tag_set = 0;
3456 cctx->iv_set = 0;
3457 cctx->len_set = 0;
3458 return 1;
3459
3460 case EVP_CTRL_COPY:
3461 {
3462 EVP_CIPHER_CTX *out = ptr;
3463 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3464 if (cctx->ccm.key) {
3465 if (cctx->ccm.key != &cctx->ks)
3466 return 0;
3467 cctx_out->ccm.key = &cctx_out->ks;
3468 }
3469 return 1;
3470 }
3471
3472 default:
3473 return -1;
3474
3475 }
3476 }
3477
aes_ccm_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3478 static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3479 const unsigned char *iv, int enc)
3480 {
3481 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3482
3483 if (iv == NULL && key == NULL)
3484 return 1;
3485
3486 if (key != NULL) {
3487 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3488
3489 if (keylen <= 0) {
3490 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3491 return 0;
3492 }
3493 do {
3494 #ifdef HWAES_CAPABLE
3495 if (HWAES_CAPABLE) {
3496 HWAES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3497
3498 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3499 &cctx->ks, (block128_f) HWAES_encrypt);
3500 cctx->str = NULL;
3501 cctx->key_set = 1;
3502 break;
3503 } else
3504 #endif
3505 #ifdef VPAES_CAPABLE
3506 if (VPAES_CAPABLE) {
3507 vpaes_set_encrypt_key(key, keylen, &cctx->ks.ks);
3508 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3509 &cctx->ks, (block128_f) vpaes_encrypt);
3510 cctx->str = NULL;
3511 cctx->key_set = 1;
3512 break;
3513 }
3514 #endif
3515 AES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3516 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3517 &cctx->ks, (block128_f) AES_encrypt);
3518 cctx->str = NULL;
3519 cctx->key_set = 1;
3520 } while (0);
3521 }
3522 if (iv != NULL) {
3523 memcpy(ctx->iv, iv, 15 - cctx->L);
3524 cctx->iv_set = 1;
3525 }
3526 return 1;
3527 }
3528
aes_ccm_tls_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3529 static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3530 const unsigned char *in, size_t len)
3531 {
3532 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3533 CCM128_CONTEXT *ccm = &cctx->ccm;
3534 /* Encrypt/decrypt must be performed in place */
3535 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3536 return -1;
3537 /* If encrypting set explicit IV from sequence number (start of AAD) */
3538 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3539 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3540 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3541 /* Get rest of IV from explicit IV */
3542 memcpy(ctx->iv + EVP_CCM_TLS_FIXED_IV_LEN, in,
3543 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3544 /* Correct length value */
3545 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3546 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L,
3547 len))
3548 return -1;
3549 /* Use saved AAD */
3550 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx),
3551 cctx->tls_aad_len);
3552 /* Fix buffer to point to payload */
3553 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3554 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3555 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3556 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3557 cctx->str) :
3558 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3559 return -1;
3560 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3561 return -1;
3562 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3563 } else {
3564 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3565 cctx->str) :
3566 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3567 unsigned char tag[16];
3568 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3569 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3570 return len;
3571 }
3572 }
3573 OPENSSL_cleanse(out, len);
3574 return -1;
3575 }
3576 }
3577
aes_ccm_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3578 static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3579 const unsigned char *in, size_t len)
3580 {
3581 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3582 CCM128_CONTEXT *ccm = &cctx->ccm;
3583 /* If not set up, return error */
3584 if (!cctx->key_set)
3585 return -1;
3586
3587 if (cctx->tls_aad_len >= 0)
3588 return aes_ccm_tls_cipher(ctx, out, in, len);
3589
3590 /* EVP_*Final() doesn't return any data */
3591 if (in == NULL && out != NULL)
3592 return 0;
3593
3594 if (!cctx->iv_set)
3595 return -1;
3596
3597 if (!out) {
3598 if (!in) {
3599 if (CRYPTO_ccm128_setiv(ccm, ctx->iv,
3600 15 - cctx->L, len))
3601 return -1;
3602 cctx->len_set = 1;
3603 return len;
3604 }
3605 /* If have AAD need message length */
3606 if (!cctx->len_set && len)
3607 return -1;
3608 CRYPTO_ccm128_aad(ccm, in, len);
3609 return len;
3610 }
3611
3612 /* The tag must be set before actually decrypting data */
3613 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && !cctx->tag_set)
3614 return -1;
3615
3616 /* If not set length yet do it */
3617 if (!cctx->len_set) {
3618 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L, len))
3619 return -1;
3620 cctx->len_set = 1;
3621 }
3622 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3623 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3624 cctx->str) :
3625 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3626 return -1;
3627 cctx->tag_set = 1;
3628 return len;
3629 } else {
3630 int rv = -1;
3631 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3632 cctx->str) :
3633 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3634 unsigned char tag[16];
3635 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3636 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3637 cctx->M))
3638 rv = len;
3639 }
3640 }
3641 if (rv == -1)
3642 OPENSSL_cleanse(out, len);
3643 cctx->iv_set = 0;
3644 cctx->tag_set = 0;
3645 cctx->len_set = 0;
3646 return rv;
3647 }
3648 }
3649
3650 #define aes_ccm_cleanup NULL
3651
3652 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3653 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3654 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3655 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3656 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3657 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3658
3659 typedef struct {
3660 union {
3661 OSSL_UNION_ALIGN;
3662 AES_KEY ks;
3663 } ks;
3664 /* Indicates if IV has been set */
3665 unsigned char *iv;
3666 } EVP_AES_WRAP_CTX;
3667
aes_wrap_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3668 static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3669 const unsigned char *iv, int enc)
3670 {
3671 int len;
3672 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3673
3674 if (iv == NULL && key == NULL)
3675 return 1;
3676 if (key != NULL) {
3677 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3678
3679 if (keylen <= 0) {
3680 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3681 return 0;
3682 }
3683 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3684 AES_set_encrypt_key(key, keylen, &wctx->ks.ks);
3685 else
3686 AES_set_decrypt_key(key, keylen, &wctx->ks.ks);
3687 if (iv == NULL)
3688 wctx->iv = NULL;
3689 }
3690 if (iv != NULL) {
3691 if ((len = EVP_CIPHER_CTX_get_iv_length(ctx)) < 0)
3692 return 0;
3693 memcpy(ctx->iv, iv, len);
3694 wctx->iv = ctx->iv;
3695 }
3696 return 1;
3697 }
3698
aes_wrap_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t inlen)3699 static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3700 const unsigned char *in, size_t inlen)
3701 {
3702 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3703 size_t rv;
3704 /* AES wrap with padding has IV length of 4, without padding 8 */
3705 int pad = EVP_CIPHER_CTX_get_iv_length(ctx) == 4;
3706 /* No final operation so always return zero length */
3707 if (!in)
3708 return 0;
3709 /* Input length must always be non-zero */
3710 if (!inlen)
3711 return -1;
3712 /* If decrypting need at least 16 bytes and multiple of 8 */
3713 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3714 return -1;
3715 /* If not padding input must be multiple of 8 */
3716 if (!pad && inlen & 0x7)
3717 return -1;
3718 if (ossl_is_partially_overlapping(out, in, inlen)) {
3719 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
3720 return 0;
3721 }
3722 if (!out) {
3723 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3724 /* If padding round up to multiple of 8 */
3725 if (pad)
3726 inlen = (inlen + 7) / 8 * 8;
3727 /* 8 byte prefix */
3728 return inlen + 8;
3729 } else {
3730 /*
3731 * If not padding output will be exactly 8 bytes smaller than
3732 * input. If padding it will be at least 8 bytes smaller but we
3733 * don't know how much.
3734 */
3735 return inlen - 8;
3736 }
3737 }
3738 if (pad) {
3739 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3740 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3741 out, in, inlen,
3742 (block128_f) AES_encrypt);
3743 else
3744 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3745 out, in, inlen,
3746 (block128_f) AES_decrypt);
3747 } else {
3748 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3749 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3750 out, in, inlen, (block128_f) AES_encrypt);
3751 else
3752 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3753 out, in, inlen, (block128_f) AES_decrypt);
3754 }
3755 return rv ? (int)rv : -1;
3756 }
3757
3758 #define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3759 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3760 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3761
3762 static const EVP_CIPHER aes_128_wrap = {
3763 NID_id_aes128_wrap,
3764 8, 16, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3765 aes_wrap_init_key, aes_wrap_cipher,
3766 NULL,
3767 sizeof(EVP_AES_WRAP_CTX),
3768 NULL, NULL, NULL, NULL
3769 };
3770
EVP_aes_128_wrap(void)3771 const EVP_CIPHER *EVP_aes_128_wrap(void)
3772 {
3773 return &aes_128_wrap;
3774 }
3775
3776 static const EVP_CIPHER aes_192_wrap = {
3777 NID_id_aes192_wrap,
3778 8, 24, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3779 aes_wrap_init_key, aes_wrap_cipher,
3780 NULL,
3781 sizeof(EVP_AES_WRAP_CTX),
3782 NULL, NULL, NULL, NULL
3783 };
3784
EVP_aes_192_wrap(void)3785 const EVP_CIPHER *EVP_aes_192_wrap(void)
3786 {
3787 return &aes_192_wrap;
3788 }
3789
3790 static const EVP_CIPHER aes_256_wrap = {
3791 NID_id_aes256_wrap,
3792 8, 32, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3793 aes_wrap_init_key, aes_wrap_cipher,
3794 NULL,
3795 sizeof(EVP_AES_WRAP_CTX),
3796 NULL, NULL, NULL, NULL
3797 };
3798
EVP_aes_256_wrap(void)3799 const EVP_CIPHER *EVP_aes_256_wrap(void)
3800 {
3801 return &aes_256_wrap;
3802 }
3803
3804 static const EVP_CIPHER aes_128_wrap_pad = {
3805 NID_id_aes128_wrap_pad,
3806 8, 16, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3807 aes_wrap_init_key, aes_wrap_cipher,
3808 NULL,
3809 sizeof(EVP_AES_WRAP_CTX),
3810 NULL, NULL, NULL, NULL
3811 };
3812
EVP_aes_128_wrap_pad(void)3813 const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3814 {
3815 return &aes_128_wrap_pad;
3816 }
3817
3818 static const EVP_CIPHER aes_192_wrap_pad = {
3819 NID_id_aes192_wrap_pad,
3820 8, 24, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3821 aes_wrap_init_key, aes_wrap_cipher,
3822 NULL,
3823 sizeof(EVP_AES_WRAP_CTX),
3824 NULL, NULL, NULL, NULL
3825 };
3826
EVP_aes_192_wrap_pad(void)3827 const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3828 {
3829 return &aes_192_wrap_pad;
3830 }
3831
3832 static const EVP_CIPHER aes_256_wrap_pad = {
3833 NID_id_aes256_wrap_pad,
3834 8, 32, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3835 aes_wrap_init_key, aes_wrap_cipher,
3836 NULL,
3837 sizeof(EVP_AES_WRAP_CTX),
3838 NULL, NULL, NULL, NULL
3839 };
3840
EVP_aes_256_wrap_pad(void)3841 const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3842 {
3843 return &aes_256_wrap_pad;
3844 }
3845
3846 #ifndef OPENSSL_NO_OCB
aes_ocb_ctrl(EVP_CIPHER_CTX * c,int type,int arg,void * ptr)3847 static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3848 {
3849 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3850 EVP_CIPHER_CTX *newc;
3851 EVP_AES_OCB_CTX *new_octx;
3852
3853 switch (type) {
3854 case EVP_CTRL_INIT:
3855 octx->key_set = 0;
3856 octx->iv_set = 0;
3857 octx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
3858 octx->iv = c->iv;
3859 octx->taglen = 16;
3860 octx->data_buf_len = 0;
3861 octx->aad_buf_len = 0;
3862 return 1;
3863
3864 case EVP_CTRL_GET_IVLEN:
3865 *(int *)ptr = octx->ivlen;
3866 return 1;
3867
3868 case EVP_CTRL_AEAD_SET_IVLEN:
3869 /* IV len must be 1 to 15 */
3870 if (arg <= 0 || arg > 15)
3871 return 0;
3872
3873 octx->ivlen = arg;
3874 return 1;
3875
3876 case EVP_CTRL_AEAD_SET_TAG:
3877 if (ptr == NULL) {
3878 /* Tag len must be 0 to 16 */
3879 if (arg < 0 || arg > 16)
3880 return 0;
3881
3882 octx->taglen = arg;
3883 return 1;
3884 }
3885 if (arg != octx->taglen || EVP_CIPHER_CTX_is_encrypting(c))
3886 return 0;
3887 memcpy(octx->tag, ptr, arg);
3888 return 1;
3889
3890 case EVP_CTRL_AEAD_GET_TAG:
3891 if (arg != octx->taglen || !EVP_CIPHER_CTX_is_encrypting(c))
3892 return 0;
3893
3894 memcpy(ptr, octx->tag, arg);
3895 return 1;
3896
3897 case EVP_CTRL_COPY:
3898 newc = (EVP_CIPHER_CTX *)ptr;
3899 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3900 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3901 &new_octx->ksenc.ks,
3902 &new_octx->ksdec.ks);
3903
3904 default:
3905 return -1;
3906
3907 }
3908 }
3909
aes_ocb_init_key(EVP_CIPHER_CTX * ctx,const unsigned char * key,const unsigned char * iv,int enc)3910 static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3911 const unsigned char *iv, int enc)
3912 {
3913 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3914
3915 if (iv == NULL && key == NULL)
3916 return 1;
3917
3918 if (key != NULL) {
3919 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3920
3921 if (keylen <= 0) {
3922 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3923 return 0;
3924 }
3925 do {
3926 /*
3927 * We set both the encrypt and decrypt key here because decrypt
3928 * needs both. We could possibly optimise to remove setting the
3929 * decrypt for an encryption operation.
3930 */
3931 # ifdef HWAES_CAPABLE
3932 if (HWAES_CAPABLE) {
3933 HWAES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3934 HWAES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3935 if (!CRYPTO_ocb128_init(&octx->ocb,
3936 &octx->ksenc.ks, &octx->ksdec.ks,
3937 (block128_f) HWAES_encrypt,
3938 (block128_f) HWAES_decrypt,
3939 enc ? HWAES_ocb_encrypt
3940 : HWAES_ocb_decrypt))
3941 return 0;
3942 break;
3943 }
3944 # endif
3945 # ifdef VPAES_CAPABLE
3946 if (VPAES_CAPABLE) {
3947 vpaes_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3948 vpaes_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3949 if (!CRYPTO_ocb128_init(&octx->ocb,
3950 &octx->ksenc.ks, &octx->ksdec.ks,
3951 (block128_f) vpaes_encrypt,
3952 (block128_f) vpaes_decrypt,
3953 NULL))
3954 return 0;
3955 break;
3956 }
3957 # endif
3958 AES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3959 AES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3960 if (!CRYPTO_ocb128_init(&octx->ocb,
3961 &octx->ksenc.ks, &octx->ksdec.ks,
3962 (block128_f) AES_encrypt,
3963 (block128_f) AES_decrypt,
3964 NULL))
3965 return 0;
3966 }
3967 while (0);
3968
3969 /*
3970 * If we have an iv we can set it directly, otherwise use saved IV.
3971 */
3972 if (iv == NULL && octx->iv_set)
3973 iv = octx->iv;
3974 if (iv) {
3975 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3976 != 1)
3977 return 0;
3978 octx->iv_set = 1;
3979 }
3980 octx->key_set = 1;
3981 } else {
3982 /* If key set use IV, otherwise copy */
3983 if (octx->key_set)
3984 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3985 else
3986 memcpy(octx->iv, iv, octx->ivlen);
3987 octx->iv_set = 1;
3988 }
3989 return 1;
3990 }
3991
aes_ocb_cipher(EVP_CIPHER_CTX * ctx,unsigned char * out,const unsigned char * in,size_t len)3992 static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3993 const unsigned char *in, size_t len)
3994 {
3995 unsigned char *buf;
3996 int *buf_len;
3997 int written_len = 0;
3998 size_t trailing_len;
3999 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
4000
4001 /* If IV or Key not set then return error */
4002 if (!octx->iv_set)
4003 return -1;
4004
4005 if (!octx->key_set)
4006 return -1;
4007
4008 if (in != NULL) {
4009 /*
4010 * Need to ensure we are only passing full blocks to low-level OCB
4011 * routines. We do it here rather than in EVP_EncryptUpdate/
4012 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
4013 * and those routines don't support that
4014 */
4015
4016 /* Are we dealing with AAD or normal data here? */
4017 if (out == NULL) {
4018 buf = octx->aad_buf;
4019 buf_len = &(octx->aad_buf_len);
4020 } else {
4021 buf = octx->data_buf;
4022 buf_len = &(octx->data_buf_len);
4023
4024 if (ossl_is_partially_overlapping(out + *buf_len, in, len)) {
4025 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
4026 return 0;
4027 }
4028 }
4029
4030 /*
4031 * If we've got a partially filled buffer from a previous call then
4032 * use that data first
4033 */
4034 if (*buf_len > 0) {
4035 unsigned int remaining;
4036
4037 remaining = AES_BLOCK_SIZE - (*buf_len);
4038 if (remaining > len) {
4039 memcpy(buf + (*buf_len), in, len);
4040 *(buf_len) += len;
4041 return 0;
4042 }
4043 memcpy(buf + (*buf_len), in, remaining);
4044
4045 /*
4046 * If we get here we've filled the buffer, so process it
4047 */
4048 len -= remaining;
4049 in += remaining;
4050 if (out == NULL) {
4051 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
4052 return -1;
4053 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4054 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
4055 AES_BLOCK_SIZE))
4056 return -1;
4057 } else {
4058 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
4059 AES_BLOCK_SIZE))
4060 return -1;
4061 }
4062 written_len = AES_BLOCK_SIZE;
4063 *buf_len = 0;
4064 if (out != NULL)
4065 out += AES_BLOCK_SIZE;
4066 }
4067
4068 /* Do we have a partial block to handle at the end? */
4069 trailing_len = len % AES_BLOCK_SIZE;
4070
4071 /*
4072 * If we've got some full blocks to handle, then process these first
4073 */
4074 if (len != trailing_len) {
4075 if (out == NULL) {
4076 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
4077 return -1;
4078 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4079 if (!CRYPTO_ocb128_encrypt
4080 (&octx->ocb, in, out, len - trailing_len))
4081 return -1;
4082 } else {
4083 if (!CRYPTO_ocb128_decrypt
4084 (&octx->ocb, in, out, len - trailing_len))
4085 return -1;
4086 }
4087 written_len += len - trailing_len;
4088 in += len - trailing_len;
4089 }
4090
4091 /* Handle any trailing partial block */
4092 if (trailing_len > 0) {
4093 memcpy(buf, in, trailing_len);
4094 *buf_len = trailing_len;
4095 }
4096
4097 return written_len;
4098 } else {
4099 /*
4100 * First of all empty the buffer of any partial block that we might
4101 * have been provided - both for data and AAD
4102 */
4103 if (octx->data_buf_len > 0) {
4104 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4105 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
4106 octx->data_buf_len))
4107 return -1;
4108 } else {
4109 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
4110 octx->data_buf_len))
4111 return -1;
4112 }
4113 written_len = octx->data_buf_len;
4114 octx->data_buf_len = 0;
4115 }
4116 if (octx->aad_buf_len > 0) {
4117 if (!CRYPTO_ocb128_aad
4118 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
4119 return -1;
4120 octx->aad_buf_len = 0;
4121 }
4122 /* If decrypting then verify */
4123 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
4124 if (octx->taglen < 0)
4125 return -1;
4126 if (CRYPTO_ocb128_finish(&octx->ocb,
4127 octx->tag, octx->taglen) != 0)
4128 return -1;
4129 octx->iv_set = 0;
4130 return written_len;
4131 }
4132 /* If encrypting then just get the tag */
4133 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4134 return -1;
4135 /* Don't reuse the IV */
4136 octx->iv_set = 0;
4137 return written_len;
4138 }
4139 }
4140
aes_ocb_cleanup(EVP_CIPHER_CTX * c)4141 static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4142 {
4143 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4144 CRYPTO_ocb128_cleanup(&octx->ocb);
4145 return 1;
4146 }
4147
4148 BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4149 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4150 BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4151 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4152 BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4153 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4154 #endif /* OPENSSL_NO_OCB */
4155