1/* 2 * Copyright 2011-2021 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the Apache License 2.0 (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 10/* 11 * All low level APIs are deprecated for public use, but still ok for internal 12 * use where we're using them to implement the higher level EVP interface, as is 13 * the case here. 14 */ 15#include "internal/deprecated.h" 16 17#include "cipher_aes_cbc_hmac_sha.h" 18 19#if !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE) 20int ossl_cipher_capable_aes_cbc_hmac_sha256(void) 21{ 22 return 0; 23} 24 25const PROV_CIPHER_HW_AES_HMAC_SHA *ossl_prov_cipher_hw_aes_cbc_hmac_sha256(void) 26{ 27 return NULL; 28} 29#else 30 31# include <openssl/rand.h> 32# include "crypto/evp.h" 33# include "internal/constant_time.h" 34 35void sha256_block_data_order(void *c, const void *p, size_t len); 36int aesni_cbc_sha256_enc(const void *inp, void *out, size_t blocks, 37 const AES_KEY *key, unsigned char iv[16], 38 SHA256_CTX *ctx, const void *in0); 39 40int ossl_cipher_capable_aes_cbc_hmac_sha256(void) 41{ 42 return AESNI_CBC_HMAC_SHA_CAPABLE 43 && aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL); 44} 45 46static int aesni_cbc_hmac_sha256_init_key(PROV_CIPHER_CTX *vctx, 47 const unsigned char *key, 48 size_t keylen) 49{ 50 int ret; 51 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx; 52 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx; 53 54 if (ctx->base.enc) 55 ret = aesni_set_encrypt_key(key, ctx->base.keylen * 8, &ctx->ks); 56 else 57 ret = aesni_set_decrypt_key(key, ctx->base.keylen * 8, &ctx->ks); 58 59 SHA256_Init(&sctx->head); /* handy when benchmarking */ 60 sctx->tail = sctx->head; 61 sctx->md = sctx->head; 62 63 ctx->payload_length = NO_PAYLOAD_LENGTH; 64 65 vctx->removetlspad = 1; 66 vctx->removetlsfixed = SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE; 67 68 return ret < 0 ? 0 : 1; 69} 70 71void sha256_block_data_order(void *c, const void *p, size_t len); 72 73static void sha256_update(SHA256_CTX *c, const void *data, size_t len) 74{ 75 const unsigned char *ptr = data; 76 size_t res; 77 78 if ((res = c->num)) { 79 res = SHA256_CBLOCK - res; 80 if (len < res) 81 res = len; 82 SHA256_Update(c, ptr, res); 83 ptr += res; 84 len -= res; 85 } 86 87 res = len % SHA256_CBLOCK; 88 len -= res; 89 90 if (len) { 91 sha256_block_data_order(c, ptr, len / SHA256_CBLOCK); 92 93 ptr += len; 94 c->Nh += len >> 29; 95 c->Nl += len <<= 3; 96 if (c->Nl < (unsigned int)len) 97 c->Nh++; 98 } 99 100 if (res) 101 SHA256_Update(c, ptr, res); 102} 103 104# if !defined(OPENSSL_NO_MULTIBLOCK) 105 106typedef struct { 107 unsigned int A[8], B[8], C[8], D[8], E[8], F[8], G[8], H[8]; 108} SHA256_MB_CTX; 109 110typedef struct { 111 const unsigned char *ptr; 112 int blocks; 113} HASH_DESC; 114 115typedef struct { 116 const unsigned char *inp; 117 unsigned char *out; 118 int blocks; 119 u64 iv[2]; 120} CIPH_DESC; 121 122void sha256_multi_block(SHA256_MB_CTX *, const HASH_DESC *, int); 123void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int); 124 125static size_t tls1_multi_block_encrypt(void *vctx, 126 unsigned char *out, 127 const unsigned char *inp, 128 size_t inp_len, int n4x) 129{ /* n4x is 1 or 2 */ 130 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx; 131 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx; 132 HASH_DESC hash_d[8], edges[8]; 133 CIPH_DESC ciph_d[8]; 134 unsigned char storage[sizeof(SHA256_MB_CTX) + 32]; 135 union { 136 u64 q[16]; 137 u32 d[32]; 138 u8 c[128]; 139 } blocks[8]; 140 SHA256_MB_CTX *mctx; 141 unsigned int frag, last, packlen, i; 142 unsigned int x4 = 4 * n4x, minblocks, processed = 0; 143 size_t ret = 0; 144 u8 *IVs; 145# if defined(BSWAP8) 146 u64 seqnum; 147# endif 148 149 /* ask for IVs in bulk */ 150 if (RAND_bytes_ex(ctx->base.libctx, (IVs = blocks[0].c), 16 * x4, 0) <= 0) 151 return 0; 152 153 mctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */ 154 155 frag = (unsigned int)inp_len >> (1 + n4x); 156 last = (unsigned int)inp_len + frag - (frag << (1 + n4x)); 157 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) { 158 frag++; 159 last -= x4 - 1; 160 } 161 162 packlen = 5 + 16 + ((frag + 32 + 16) & -16); 163 164 /* populate descriptors with pointers and IVs */ 165 hash_d[0].ptr = inp; 166 ciph_d[0].inp = inp; 167 /* 5+16 is place for header and explicit IV */ 168 ciph_d[0].out = out + 5 + 16; 169 memcpy(ciph_d[0].out - 16, IVs, 16); 170 memcpy(ciph_d[0].iv, IVs, 16); 171 IVs += 16; 172 173 for (i = 1; i < x4; i++) { 174 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag; 175 ciph_d[i].out = ciph_d[i - 1].out + packlen; 176 memcpy(ciph_d[i].out - 16, IVs, 16); 177 memcpy(ciph_d[i].iv, IVs, 16); 178 IVs += 16; 179 } 180 181# if defined(BSWAP8) 182 memcpy(blocks[0].c, sctx->md.data, 8); 183 seqnum = BSWAP8(blocks[0].q[0]); 184# endif 185 186 for (i = 0; i < x4; i++) { 187 unsigned int len = (i == (x4 - 1) ? last : frag); 188# if !defined(BSWAP8) 189 unsigned int carry, j; 190# endif 191 192 mctx->A[i] = sctx->md.h[0]; 193 mctx->B[i] = sctx->md.h[1]; 194 mctx->C[i] = sctx->md.h[2]; 195 mctx->D[i] = sctx->md.h[3]; 196 mctx->E[i] = sctx->md.h[4]; 197 mctx->F[i] = sctx->md.h[5]; 198 mctx->G[i] = sctx->md.h[6]; 199 mctx->H[i] = sctx->md.h[7]; 200 201 /* fix seqnum */ 202# if defined(BSWAP8) 203 blocks[i].q[0] = BSWAP8(seqnum + i); 204# else 205 for (carry = i, j = 8; j--;) { 206 blocks[i].c[j] = ((u8 *)sctx->md.data)[j] + carry; 207 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1); 208 } 209# endif 210 blocks[i].c[8] = ((u8 *)sctx->md.data)[8]; 211 blocks[i].c[9] = ((u8 *)sctx->md.data)[9]; 212 blocks[i].c[10] = ((u8 *)sctx->md.data)[10]; 213 /* fix length */ 214 blocks[i].c[11] = (u8)(len >> 8); 215 blocks[i].c[12] = (u8)(len); 216 217 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13); 218 hash_d[i].ptr += 64 - 13; 219 hash_d[i].blocks = (len - (64 - 13)) / 64; 220 221 edges[i].ptr = blocks[i].c; 222 edges[i].blocks = 1; 223 } 224 225 /* hash 13-byte headers and first 64-13 bytes of inputs */ 226 sha256_multi_block(mctx, edges, n4x); 227 /* hash bulk inputs */ 228# define MAXCHUNKSIZE 2048 229# if MAXCHUNKSIZE%64 230# error "MAXCHUNKSIZE is not divisible by 64" 231# elif MAXCHUNKSIZE 232 /* 233 * goal is to minimize pressure on L1 cache by moving in shorter steps, 234 * so that hashed data is still in the cache by the time we encrypt it 235 */ 236 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64; 237 if (minblocks > MAXCHUNKSIZE / 64) { 238 for (i = 0; i < x4; i++) { 239 edges[i].ptr = hash_d[i].ptr; 240 edges[i].blocks = MAXCHUNKSIZE / 64; 241 ciph_d[i].blocks = MAXCHUNKSIZE / 16; 242 } 243 do { 244 sha256_multi_block(mctx, edges, n4x); 245 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x); 246 247 for (i = 0; i < x4; i++) { 248 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE; 249 hash_d[i].blocks -= MAXCHUNKSIZE / 64; 250 edges[i].blocks = MAXCHUNKSIZE / 64; 251 ciph_d[i].inp += MAXCHUNKSIZE; 252 ciph_d[i].out += MAXCHUNKSIZE; 253 ciph_d[i].blocks = MAXCHUNKSIZE / 16; 254 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16); 255 } 256 processed += MAXCHUNKSIZE; 257 minblocks -= MAXCHUNKSIZE / 64; 258 } while (minblocks > MAXCHUNKSIZE / 64); 259 } 260# endif 261# undef MAXCHUNKSIZE 262 sha256_multi_block(mctx, hash_d, n4x); 263 264 memset(blocks, 0, sizeof(blocks)); 265 for (i = 0; i < x4; i++) { 266 unsigned int len = (i == (x4 - 1) ? last : frag), 267 off = hash_d[i].blocks * 64; 268 const unsigned char *ptr = hash_d[i].ptr + off; 269 270 off = (len - processed) - (64 - 13) - off; /* remainder actually */ 271 memcpy(blocks[i].c, ptr, off); 272 blocks[i].c[off] = 0x80; 273 len += 64 + 13; /* 64 is HMAC header */ 274 len *= 8; /* convert to bits */ 275 if (off < (64 - 8)) { 276# ifdef BSWAP4 277 blocks[i].d[15] = BSWAP4(len); 278# else 279 PUTU32(blocks[i].c + 60, len); 280# endif 281 edges[i].blocks = 1; 282 } else { 283# ifdef BSWAP4 284 blocks[i].d[31] = BSWAP4(len); 285# else 286 PUTU32(blocks[i].c + 124, len); 287# endif 288 edges[i].blocks = 2; 289 } 290 edges[i].ptr = blocks[i].c; 291 } 292 293 /* hash input tails and finalize */ 294 sha256_multi_block(mctx, edges, n4x); 295 296 memset(blocks, 0, sizeof(blocks)); 297 for (i = 0; i < x4; i++) { 298# ifdef BSWAP4 299 blocks[i].d[0] = BSWAP4(mctx->A[i]); 300 mctx->A[i] = sctx->tail.h[0]; 301 blocks[i].d[1] = BSWAP4(mctx->B[i]); 302 mctx->B[i] = sctx->tail.h[1]; 303 blocks[i].d[2] = BSWAP4(mctx->C[i]); 304 mctx->C[i] = sctx->tail.h[2]; 305 blocks[i].d[3] = BSWAP4(mctx->D[i]); 306 mctx->D[i] = sctx->tail.h[3]; 307 blocks[i].d[4] = BSWAP4(mctx->E[i]); 308 mctx->E[i] = sctx->tail.h[4]; 309 blocks[i].d[5] = BSWAP4(mctx->F[i]); 310 mctx->F[i] = sctx->tail.h[5]; 311 blocks[i].d[6] = BSWAP4(mctx->G[i]); 312 mctx->G[i] = sctx->tail.h[6]; 313 blocks[i].d[7] = BSWAP4(mctx->H[i]); 314 mctx->H[i] = sctx->tail.h[7]; 315 blocks[i].c[32] = 0x80; 316 blocks[i].d[15] = BSWAP4((64 + 32) * 8); 317# else 318 PUTU32(blocks[i].c + 0, mctx->A[i]); 319 mctx->A[i] = sctx->tail.h[0]; 320 PUTU32(blocks[i].c + 4, mctx->B[i]); 321 mctx->B[i] = sctx->tail.h[1]; 322 PUTU32(blocks[i].c + 8, mctx->C[i]); 323 mctx->C[i] = sctx->tail.h[2]; 324 PUTU32(blocks[i].c + 12, mctx->D[i]); 325 mctx->D[i] = sctx->tail.h[3]; 326 PUTU32(blocks[i].c + 16, mctx->E[i]); 327 mctx->E[i] = sctx->tail.h[4]; 328 PUTU32(blocks[i].c + 20, mctx->F[i]); 329 mctx->F[i] = sctx->tail.h[5]; 330 PUTU32(blocks[i].c + 24, mctx->G[i]); 331 mctx->G[i] = sctx->tail.h[6]; 332 PUTU32(blocks[i].c + 28, mctx->H[i]); 333 mctx->H[i] = sctx->tail.h[7]; 334 blocks[i].c[32] = 0x80; 335 PUTU32(blocks[i].c + 60, (64 + 32) * 8); 336# endif /* BSWAP */ 337 edges[i].ptr = blocks[i].c; 338 edges[i].blocks = 1; 339 } 340 341 /* finalize MACs */ 342 sha256_multi_block(mctx, edges, n4x); 343 344 for (i = 0; i < x4; i++) { 345 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j; 346 unsigned char *out0 = out; 347 348 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed); 349 ciph_d[i].inp = ciph_d[i].out; 350 351 out += 5 + 16 + len; 352 353 /* write MAC */ 354 PUTU32(out + 0, mctx->A[i]); 355 PUTU32(out + 4, mctx->B[i]); 356 PUTU32(out + 8, mctx->C[i]); 357 PUTU32(out + 12, mctx->D[i]); 358 PUTU32(out + 16, mctx->E[i]); 359 PUTU32(out + 20, mctx->F[i]); 360 PUTU32(out + 24, mctx->G[i]); 361 PUTU32(out + 28, mctx->H[i]); 362 out += 32; 363 len += 32; 364 365 /* pad */ 366 pad = 15 - len % 16; 367 for (j = 0; j <= pad; j++) 368 *(out++) = pad; 369 len += pad + 1; 370 371 ciph_d[i].blocks = (len - processed) / 16; 372 len += 16; /* account for explicit iv */ 373 374 /* arrange header */ 375 out0[0] = ((u8 *)sctx->md.data)[8]; 376 out0[1] = ((u8 *)sctx->md.data)[9]; 377 out0[2] = ((u8 *)sctx->md.data)[10]; 378 out0[3] = (u8)(len >> 8); 379 out0[4] = (u8)(len); 380 381 ret += len + 5; 382 inp += frag; 383 } 384 385 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x); 386 387 OPENSSL_cleanse(blocks, sizeof(blocks)); 388 OPENSSL_cleanse(mctx, sizeof(*mctx)); 389 390 ctx->multiblock_encrypt_len = ret; 391 return ret; 392} 393# endif /* !OPENSSL_NO_MULTIBLOCK */ 394 395static int aesni_cbc_hmac_sha256_cipher(PROV_CIPHER_CTX *vctx, 396 unsigned char *out, 397 const unsigned char *in, size_t len) 398{ 399 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx; 400 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx; 401 unsigned int l; 402 size_t plen = ctx->payload_length; 403 size_t iv = 0; /* explicit IV in TLS 1.1 and * later */ 404 size_t aes_off = 0, blocks; 405 size_t sha_off = SHA256_CBLOCK - sctx->md.num; 406 407 ctx->payload_length = NO_PAYLOAD_LENGTH; 408 409 if (len % AES_BLOCK_SIZE) 410 return 0; 411 412 if (ctx->base.enc) { 413 if (plen == NO_PAYLOAD_LENGTH) 414 plen = len; 415 else if (len != 416 ((plen + SHA256_DIGEST_LENGTH + 417 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) 418 return 0; 419 else if (ctx->aux.tls_ver >= TLS1_1_VERSION) 420 iv = AES_BLOCK_SIZE; 421 422 /* 423 * Assembly stitch handles AVX-capable processors, but its 424 * performance is not optimal on AMD Jaguar, ~40% worse, for 425 * unknown reasons. Incidentally processor in question supports 426 * AVX, but not AMD-specific XOP extension, which can be used 427 * to identify it and avoid stitch invocation. So that after we 428 * establish that current CPU supports AVX, we even see if it's 429 * either even XOP-capable Bulldozer-based or GenuineIntel one. 430 * But SHAEXT-capable go ahead... 431 */ 432 if (((OPENSSL_ia32cap_P[2] & (1 << 29)) || /* SHAEXT? */ 433 ((OPENSSL_ia32cap_P[1] & (1 << (60 - 32))) && /* AVX? */ 434 ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */ 435 | (OPENSSL_ia32cap_P[0] & (1 << 30))))) && /* "Intel CPU"? */ 436 plen > (sha_off + iv) && 437 (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) { 438 sha256_update(&sctx->md, in + iv, sha_off); 439 440 (void)aesni_cbc_sha256_enc(in, out, blocks, &ctx->ks, 441 ctx->base.iv, 442 &sctx->md, in + iv + sha_off); 443 blocks *= SHA256_CBLOCK; 444 aes_off += blocks; 445 sha_off += blocks; 446 sctx->md.Nh += blocks >> 29; 447 sctx->md.Nl += blocks <<= 3; 448 if (sctx->md.Nl < (unsigned int)blocks) 449 sctx->md.Nh++; 450 } else { 451 sha_off = 0; 452 } 453 sha_off += iv; 454 sha256_update(&sctx->md, in + sha_off, plen - sha_off); 455 456 if (plen != len) { /* "TLS" mode of operation */ 457 if (in != out) 458 memcpy(out + aes_off, in + aes_off, plen - aes_off); 459 460 /* calculate HMAC and append it to payload */ 461 SHA256_Final(out + plen, &sctx->md); 462 sctx->md = sctx->tail; 463 sha256_update(&sctx->md, out + plen, SHA256_DIGEST_LENGTH); 464 SHA256_Final(out + plen, &sctx->md); 465 466 /* pad the payload|hmac */ 467 plen += SHA256_DIGEST_LENGTH; 468 for (l = len - plen - 1; plen < len; plen++) 469 out[plen] = l; 470 /* encrypt HMAC|padding at once */ 471 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, 472 &ctx->ks, ctx->base.iv, 1); 473 } else { 474 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off, 475 &ctx->ks, ctx->base.iv, 1); 476 } 477 } else { 478 union { 479 unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)]; 480 unsigned char c[64 + SHA256_DIGEST_LENGTH]; 481 } mac, *pmac; 482 483 /* arrange cache line alignment */ 484 pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64)); 485 486 /* decrypt HMAC|padding at once */ 487 aesni_cbc_encrypt(in, out, len, &ctx->ks, 488 ctx->base.iv, 0); 489 490 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */ 491 size_t inp_len, mask, j, i; 492 unsigned int res, maxpad, pad, bitlen; 493 int ret = 1; 494 union { 495 unsigned int u[SHA_LBLOCK]; 496 unsigned char c[SHA256_CBLOCK]; 497 } *data = (void *)sctx->md.data; 498 499 if ((ctx->aux.tls_aad[plen - 4] << 8 | ctx->aux.tls_aad[plen - 3]) 500 >= TLS1_1_VERSION) 501 iv = AES_BLOCK_SIZE; 502 503 if (len < (iv + SHA256_DIGEST_LENGTH + 1)) 504 return 0; 505 506 /* omit explicit iv */ 507 out += iv; 508 len -= iv; 509 510 /* figure out payload length */ 511 pad = out[len - 1]; 512 maxpad = len - (SHA256_DIGEST_LENGTH + 1); 513 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); 514 maxpad &= 255; 515 516 mask = constant_time_ge(maxpad, pad); 517 ret &= mask; 518 /* 519 * If pad is invalid then we will fail the above test but we must 520 * continue anyway because we are in constant time code. However, 521 * we'll use the maxpad value instead of the supplied pad to make 522 * sure we perform well defined pointer arithmetic. 523 */ 524 pad = constant_time_select(mask, pad, maxpad); 525 526 inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1); 527 528 ctx->aux.tls_aad[plen - 2] = inp_len >> 8; 529 ctx->aux.tls_aad[plen - 1] = inp_len; 530 531 /* calculate HMAC */ 532 sctx->md = sctx->head; 533 sha256_update(&sctx->md, ctx->aux.tls_aad, plen); 534 535 /* code with lucky-13 fix */ 536 len -= SHA256_DIGEST_LENGTH; /* amend mac */ 537 if (len >= (256 + SHA256_CBLOCK)) { 538 j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK); 539 j += SHA256_CBLOCK - sctx->md.num; 540 sha256_update(&sctx->md, out, j); 541 out += j; 542 len -= j; 543 inp_len -= j; 544 } 545 546 /* but pretend as if we hashed padded payload */ 547 bitlen = sctx->md.Nl + (inp_len << 3); /* at most 18 bits */ 548# ifdef BSWAP4 549 bitlen = BSWAP4(bitlen); 550# else 551 mac.c[0] = 0; 552 mac.c[1] = (unsigned char)(bitlen >> 16); 553 mac.c[2] = (unsigned char)(bitlen >> 8); 554 mac.c[3] = (unsigned char)bitlen; 555 bitlen = mac.u[0]; 556# endif /* BSWAP */ 557 558 pmac->u[0] = 0; 559 pmac->u[1] = 0; 560 pmac->u[2] = 0; 561 pmac->u[3] = 0; 562 pmac->u[4] = 0; 563 pmac->u[5] = 0; 564 pmac->u[6] = 0; 565 pmac->u[7] = 0; 566 567 for (res = sctx->md.num, j = 0; j < len; j++) { 568 size_t c = out[j]; 569 mask = (j - inp_len) >> (sizeof(j) * 8 - 8); 570 c &= mask; 571 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8)); 572 data->c[res++] = (unsigned char)c; 573 574 if (res != SHA256_CBLOCK) 575 continue; 576 577 /* j is not incremented yet */ 578 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1)); 579 data->u[SHA_LBLOCK - 1] |= bitlen & mask; 580 sha256_block_data_order(&sctx->md, data, 1); 581 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1)); 582 pmac->u[0] |= sctx->md.h[0] & mask; 583 pmac->u[1] |= sctx->md.h[1] & mask; 584 pmac->u[2] |= sctx->md.h[2] & mask; 585 pmac->u[3] |= sctx->md.h[3] & mask; 586 pmac->u[4] |= sctx->md.h[4] & mask; 587 pmac->u[5] |= sctx->md.h[5] & mask; 588 pmac->u[6] |= sctx->md.h[6] & mask; 589 pmac->u[7] |= sctx->md.h[7] & mask; 590 res = 0; 591 } 592 593 for (i = res; i < SHA256_CBLOCK; i++, j++) 594 data->c[i] = 0; 595 596 if (res > SHA256_CBLOCK - 8) { 597 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1)); 598 data->u[SHA_LBLOCK - 1] |= bitlen & mask; 599 sha256_block_data_order(&sctx->md, data, 1); 600 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); 601 pmac->u[0] |= sctx->md.h[0] & mask; 602 pmac->u[1] |= sctx->md.h[1] & mask; 603 pmac->u[2] |= sctx->md.h[2] & mask; 604 pmac->u[3] |= sctx->md.h[3] & mask; 605 pmac->u[4] |= sctx->md.h[4] & mask; 606 pmac->u[5] |= sctx->md.h[5] & mask; 607 pmac->u[6] |= sctx->md.h[6] & mask; 608 pmac->u[7] |= sctx->md.h[7] & mask; 609 610 memset(data, 0, SHA256_CBLOCK); 611 j += 64; 612 } 613 data->u[SHA_LBLOCK - 1] = bitlen; 614 sha256_block_data_order(&sctx->md, data, 1); 615 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); 616 pmac->u[0] |= sctx->md.h[0] & mask; 617 pmac->u[1] |= sctx->md.h[1] & mask; 618 pmac->u[2] |= sctx->md.h[2] & mask; 619 pmac->u[3] |= sctx->md.h[3] & mask; 620 pmac->u[4] |= sctx->md.h[4] & mask; 621 pmac->u[5] |= sctx->md.h[5] & mask; 622 pmac->u[6] |= sctx->md.h[6] & mask; 623 pmac->u[7] |= sctx->md.h[7] & mask; 624 625# ifdef BSWAP4 626 pmac->u[0] = BSWAP4(pmac->u[0]); 627 pmac->u[1] = BSWAP4(pmac->u[1]); 628 pmac->u[2] = BSWAP4(pmac->u[2]); 629 pmac->u[3] = BSWAP4(pmac->u[3]); 630 pmac->u[4] = BSWAP4(pmac->u[4]); 631 pmac->u[5] = BSWAP4(pmac->u[5]); 632 pmac->u[6] = BSWAP4(pmac->u[6]); 633 pmac->u[7] = BSWAP4(pmac->u[7]); 634# else 635 for (i = 0; i < 8; i++) { 636 res = pmac->u[i]; 637 pmac->c[4 * i + 0] = (unsigned char)(res >> 24); 638 pmac->c[4 * i + 1] = (unsigned char)(res >> 16); 639 pmac->c[4 * i + 2] = (unsigned char)(res >> 8); 640 pmac->c[4 * i + 3] = (unsigned char)res; 641 } 642# endif /* BSWAP */ 643 len += SHA256_DIGEST_LENGTH; 644 sctx->md = sctx->tail; 645 sha256_update(&sctx->md, pmac->c, SHA256_DIGEST_LENGTH); 646 SHA256_Final(pmac->c, &sctx->md); 647 648 /* verify HMAC */ 649 out += inp_len; 650 len -= inp_len; 651 /* code containing lucky-13 fix */ 652 { 653 unsigned char *p = 654 out + len - 1 - maxpad - SHA256_DIGEST_LENGTH; 655 size_t off = out - p; 656 unsigned int c, cmask; 657 658 for (res = 0, i = 0, j = 0; 659 j < maxpad + SHA256_DIGEST_LENGTH; 660 j++) { 661 c = p[j]; 662 cmask = 663 ((int)(j - off - SHA256_DIGEST_LENGTH)) >> 664 (sizeof(int) * 8 - 1); 665 res |= (c ^ pad) & ~cmask; /* ... and padding */ 666 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1); 667 res |= (c ^ pmac->c[i]) & cmask; 668 i += 1 & cmask; 669 } 670 671 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); 672 ret &= (int)~res; 673 } 674 return ret; 675 } else { 676 sha256_update(&sctx->md, out, len); 677 } 678 } 679 680 return 1; 681} 682 683/* EVP_CTRL_AEAD_SET_MAC_KEY */ 684static void aesni_cbc_hmac_sha256_set_mac_key(void *vctx, 685 const unsigned char *mackey, 686 size_t len) 687{ 688 PROV_AES_HMAC_SHA256_CTX *ctx = (PROV_AES_HMAC_SHA256_CTX *)vctx; 689 unsigned int i; 690 unsigned char hmac_key[64]; 691 692 memset(hmac_key, 0, sizeof(hmac_key)); 693 694 if (len > sizeof(hmac_key)) { 695 SHA256_Init(&ctx->head); 696 sha256_update(&ctx->head, mackey, len); 697 SHA256_Final(hmac_key, &ctx->head); 698 } else { 699 memcpy(hmac_key, mackey, len); 700 } 701 702 for (i = 0; i < sizeof(hmac_key); i++) 703 hmac_key[i] ^= 0x36; /* ipad */ 704 SHA256_Init(&ctx->head); 705 sha256_update(&ctx->head, hmac_key, sizeof(hmac_key)); 706 707 for (i = 0; i < sizeof(hmac_key); i++) 708 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */ 709 SHA256_Init(&ctx->tail); 710 sha256_update(&ctx->tail, hmac_key, sizeof(hmac_key)); 711 712 OPENSSL_cleanse(hmac_key, sizeof(hmac_key)); 713} 714 715/* EVP_CTRL_AEAD_TLS1_AAD */ 716static int aesni_cbc_hmac_sha256_set_tls1_aad(void *vctx, 717 unsigned char *aad_rec, int aad_len) 718{ 719 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx; 720 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx; 721 unsigned char *p = aad_rec; 722 unsigned int len; 723 724 if (aad_len != EVP_AEAD_TLS1_AAD_LEN) 725 return -1; 726 727 len = p[aad_len - 2] << 8 | p[aad_len - 1]; 728 729 if (ctx->base.enc) { 730 ctx->payload_length = len; 731 if ((ctx->aux.tls_ver = 732 p[aad_len - 4] << 8 | p[aad_len - 3]) >= TLS1_1_VERSION) { 733 if (len < AES_BLOCK_SIZE) 734 return 0; 735 len -= AES_BLOCK_SIZE; 736 p[aad_len - 2] = len >> 8; 737 p[aad_len - 1] = len; 738 } 739 sctx->md = sctx->head; 740 sha256_update(&sctx->md, p, aad_len); 741 ctx->tls_aad_pad = (int)(((len + SHA256_DIGEST_LENGTH + 742 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE) 743 - len); 744 return 1; 745 } else { 746 memcpy(ctx->aux.tls_aad, p, aad_len); 747 ctx->payload_length = aad_len; 748 ctx->tls_aad_pad = SHA256_DIGEST_LENGTH; 749 return 1; 750 } 751} 752 753# if !defined(OPENSSL_NO_MULTIBLOCK) 754/* EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE */ 755static int aesni_cbc_hmac_sha256_tls1_multiblock_max_bufsize( 756 void *vctx) 757{ 758 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx; 759 760 OPENSSL_assert(ctx->multiblock_max_send_fragment != 0); 761 return (int)(5 + 16 762 + (((int)ctx->multiblock_max_send_fragment + 32 + 16) & -16)); 763} 764 765/* EVP_CTRL_TLS1_1_MULTIBLOCK_AAD */ 766static int aesni_cbc_hmac_sha256_tls1_multiblock_aad( 767 void *vctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param) 768{ 769 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx; 770 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx; 771 unsigned int n4x = 1, x4; 772 unsigned int frag, last, packlen, inp_len; 773 774 inp_len = param->inp[11] << 8 | param->inp[12]; 775 776 if (ctx->base.enc) { 777 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION) 778 return -1; 779 780 if (inp_len) { 781 if (inp_len < 4096) 782 return 0; /* too short */ 783 784 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5)) 785 n4x = 2; /* AVX2 */ 786 } else if ((n4x = param->interleave / 4) && n4x <= 2) 787 inp_len = param->len; 788 else 789 return -1; 790 791 sctx->md = sctx->head; 792 sha256_update(&sctx->md, param->inp, 13); 793 794 x4 = 4 * n4x; 795 n4x += 1; 796 797 frag = inp_len >> n4x; 798 last = inp_len + frag - (frag << n4x); 799 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) { 800 frag++; 801 last -= x4 - 1; 802 } 803 804 packlen = 5 + 16 + ((frag + 32 + 16) & -16); 805 packlen = (packlen << n4x) - packlen; 806 packlen += 5 + 16 + ((last + 32 + 16) & -16); 807 808 param->interleave = x4; 809 /* The returned values used by get need to be stored */ 810 ctx->multiblock_interleave = x4; 811 ctx->multiblock_aad_packlen = packlen; 812 return 1; 813 } 814 return -1; /* not yet */ 815} 816 817/* EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT */ 818static int aesni_cbc_hmac_sha256_tls1_multiblock_encrypt( 819 void *ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param) 820{ 821 return (int)tls1_multi_block_encrypt(ctx, param->out, 822 param->inp, param->len, 823 param->interleave / 4); 824} 825# endif 826 827static const PROV_CIPHER_HW_AES_HMAC_SHA cipher_hw_aes_hmac_sha256 = { 828 { 829 aesni_cbc_hmac_sha256_init_key, 830 aesni_cbc_hmac_sha256_cipher 831 }, 832 aesni_cbc_hmac_sha256_set_mac_key, 833 aesni_cbc_hmac_sha256_set_tls1_aad, 834# if !defined(OPENSSL_NO_MULTIBLOCK) 835 aesni_cbc_hmac_sha256_tls1_multiblock_max_bufsize, 836 aesni_cbc_hmac_sha256_tls1_multiblock_aad, 837 aesni_cbc_hmac_sha256_tls1_multiblock_encrypt 838# endif 839}; 840 841const PROV_CIPHER_HW_AES_HMAC_SHA *ossl_prov_cipher_hw_aes_cbc_hmac_sha256(void) 842{ 843 return &cipher_hw_aes_hmac_sha256; 844} 845 846#endif /* !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE) */ 847