sha1.c (673007c6) | sha1.c (734b6a94) |
---|---|
1/* 2 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6#pragma ident "%Z%%M% %I% %E% SMI" 7 8/* --- 27 unchanged lines hidden (view full) --- 36 37#include <sys/types.h> 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysmacros.h> 41#include <sys/sha1.h> 42#include <sys/sha1_consts.h> 43 | 1/* 2 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6#pragma ident "%Z%%M% %I% %E% SMI" 7 8/* --- 27 unchanged lines hidden (view full) --- 36 37#include <sys/types.h> 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysmacros.h> 41#include <sys/sha1.h> 42#include <sys/sha1_consts.h> 43 |
44#ifdef _KERNEL 45 46#include <sys/modctl.h> 47#include <sys/cmn_err.h> 48#include <sys/note.h> 49#include <sys/crypto/common.h> 50#include <sys/crypto/spi.h> 51#include <sys/strsun.h> 52 53/* 54 * The sha1 module is created with two modlinkages: 55 * - a modlmisc that allows consumers to directly call the entry points 56 * SHA1Init, SHA1Update, and SHA1Final. 57 * - a modlcrypto that allows the module to register with the Kernel 58 * Cryptographic Framework (KCF) as a software provider for the SHA1 59 * mechanisms. 60 */ 61 62#endif /* _KERNEL */ | |
63#ifndef _KERNEL 64#include <strings.h> 65#include <stdlib.h> 66#include <errno.h> 67#include <sys/systeminfo.h> | 44#ifndef _KERNEL 45#include <strings.h> 46#include <stdlib.h> 47#include <errno.h> 48#include <sys/systeminfo.h> |
68#endif /* !_KERNEL */ | 49#endif /* !_KERNEL */ |
69 | 50 |
70static void Encode(uint8_t *, uint32_t *, size_t); 71static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, 72 SHA1_CTX *, const uint8_t *); | 51static void Encode(uint8_t *, const uint32_t *, size_t); |
73 | 52 |
74static uint8_t PADDING[64] = { 0x80, /* all zeros */ }; | 53#if defined(__sparc) |
75 | 54 |
76/* 77 * F, G, and H are the basic SHA1 functions. 78 */ 79#define F(b, c, d) (((b) & (c)) | ((~b) & (d))) 80#define G(b, c, d) ((b) ^ (c) ^ (d)) 81#define H(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d))) | 55#define SHA1_TRANSFORM(ctx, in) \ 56 SHA1Transform((ctx)->state[0], (ctx)->state[1], (ctx)->state[2], \ 57 (ctx)->state[3], (ctx)->state[4], (ctx), (in)) |
82 | 58 |
83/* 84 * ROTATE_LEFT rotates x left n bits. 85 */ 86#define ROTATE_LEFT(x, n) \ 87 (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n)))) | 59static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, 60 SHA1_CTX *, const uint8_t *); |
88 | 61 |
89#ifdef _KERNEL | 62#else |
90 | 63 |
91static struct modlmisc modlmisc = { 92 &mod_miscops, 93 "SHA1 Message-Digest Algorithm" 94}; | 64#define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in)) |
95 | 65 |
96static struct modlcrypto modlcrypto = { 97 &mod_cryptoops, 98 "SHA1 Kernel SW Provider %I%" 99}; | 66static void SHA1Transform(SHA1_CTX *, const uint8_t *); |
100 | 67 |
101static struct modlinkage modlinkage = { 102 MODREV_1, &modlmisc, &modlcrypto, NULL 103}; | 68#endif |
104 | 69 |
105/* 106 * CSPI information (entry points, provider info, etc.) 107 */ | |
108 | 70 |
109typedef enum sha1_mech_type { 110 SHA1_MECH_INFO_TYPE, /* SUN_CKM_SHA1 */ 111 SHA1_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA1_HMAC */ 112 SHA1_HMAC_GEN_MECH_INFO_TYPE /* SUN_CKM_SHA1_HMAC_GENERAL */ 113} sha1_mech_type_t; | 71static uint8_t PADDING[64] = { 0x80, /* all zeros */ }; |
114 | 72 |
115#define SHA1_DIGEST_LENGTH 20 /* SHA1 digest length in bytes */ 116#define SHA1_HMAC_BLOCK_SIZE 64 /* SHA1-HMAC block size */ 117#define SHA1_HMAC_MIN_KEY_LEN 8 /* SHA1-HMAC min key length in bits */ 118#define SHA1_HMAC_MAX_KEY_LEN INT_MAX /* SHA1-HMAC max key length in bits */ 119#define SHA1_HMAC_INTS_PER_BLOCK (SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t)) 120 | |
121/* | 73/* |
122 * Context for SHA1 mechanism. | 74 * F, G, and H are the basic SHA1 functions. |
123 */ | 75 */ |
124typedef struct sha1_ctx { 125 sha1_mech_type_t sc_mech_type; /* type of context */ 126 SHA1_CTX sc_sha1_ctx; /* SHA1 context */ 127} sha1_ctx_t; | 76#define F(b, c, d) (((b) & (c)) | ((~b) & (d))) 77#define G(b, c, d) ((b) ^ (c) ^ (d)) 78#define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d))) |
128 129/* | 79 80/* |
130 * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms. | 81 * ROTATE_LEFT rotates x left n bits. |
131 */ | 82 */ |
132typedef struct sha1_hmac_ctx { 133 sha1_mech_type_t hc_mech_type; /* type of context */ 134 uint32_t hc_digest_len; /* digest len in bytes */ 135 SHA1_CTX hc_icontext; /* inner SHA1 context */ 136 SHA1_CTX hc_ocontext; /* outer SHA1 context */ 137} sha1_hmac_ctx_t; | |
138 | 83 |
139/* 140 * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed 141 * by KCF to one of the entry points. 142 */ | 84#if defined(__GNUC__) && defined(_LP64) 85static __inline__ uint64_t 86ROTATE_LEFT(uint64_t value, uint32_t n) 87{ 88 uint32_t t32; |
143 | 89 |
144#define PROV_SHA1_CTX(ctx) ((sha1_ctx_t *)(ctx)->cc_provider_private) 145#define PROV_SHA1_HMAC_CTX(ctx) ((sha1_hmac_ctx_t *)(ctx)->cc_provider_private) 146 147/* to extract the digest length passed as mechanism parameter */ 148#define PROV_SHA1_GET_DIGEST_LEN(m, len) { \ 149 if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \ 150 (len) = (uint32_t)*((ulong_t *)mechanism->cm_param); \ 151 else { \ 152 ulong_t tmp_ulong; \ 153 bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \ 154 (len) = (uint32_t)tmp_ulong; \ 155 } \ | 90 t32 = (uint32_t)value; 91 return ((t32 << n) | (t32 >> (32 - n))); |
156} 157 | 92} 93 |
158#define PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) { \ 159 SHA1Init(ctx); \ 160 SHA1Update(ctx, key, len); \ 161 SHA1Final(digest, ctx); \ 162} | 94#else |
163 | 95 |
164/* 165 * Mechanism info structure passed to KCF during registration. 166 */ 167static crypto_mech_info_t sha1_mech_info_tab[] = { 168 /* SHA1 */ 169 {SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE, 170 CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC, 171 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS}, 172 /* SHA1-HMAC */ 173 {SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE, 174 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC, 175 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN, 176 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 177 /* SHA1-HMAC GENERAL */ 178 {SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE, 179 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC, 180 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN, 181 CRYPTO_KEYSIZE_UNIT_IN_BITS} 182}; | 96#define ROTATE_LEFT(x, n) \ 97 (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n)))) |
183 | 98 |
184static void sha1_provider_status(crypto_provider_handle_t, uint_t *); | 99#endif |
185 | 100 |
186static crypto_control_ops_t sha1_control_ops = { 187 sha1_provider_status 188}; | 101#if defined(__GNUC__) && (defined(__i386) || defined(__amd64)) |
189 | 102 |
190static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *, 191 crypto_req_handle_t); 192static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 193 crypto_req_handle_t); 194static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *, 195 crypto_req_handle_t); 196static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *, 197 crypto_req_handle_t); 198static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t, 199 crypto_mechanism_t *, crypto_data_t *, crypto_data_t *, 200 crypto_req_handle_t); | 103#define HAVE_BSWAP |
201 | 104 |
202static crypto_digest_ops_t sha1_digest_ops = { 203 sha1_digest_init, 204 sha1_digest, 205 sha1_digest_update, 206 NULL, 207 sha1_digest_final, 208 sha1_digest_atomic 209}; 210 211static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, 212 crypto_spi_ctx_template_t, crypto_req_handle_t); 213static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *, 214 crypto_req_handle_t); 215static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t); 216static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t, 217 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 218 crypto_spi_ctx_template_t, crypto_req_handle_t); 219static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, 220 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 221 crypto_spi_ctx_template_t, crypto_req_handle_t); 222 223static crypto_mac_ops_t sha1_mac_ops = { 224 sha1_mac_init, 225 NULL, 226 sha1_mac_update, 227 sha1_mac_final, 228 sha1_mac_atomic, 229 sha1_mac_verify_atomic 230}; 231 232static int sha1_create_ctx_template(crypto_provider_handle_t, 233 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, 234 size_t *, crypto_req_handle_t); 235static int sha1_free_context(crypto_ctx_t *); 236 237static crypto_ctx_ops_t sha1_ctx_ops = { 238 sha1_create_ctx_template, 239 sha1_free_context 240}; 241 242static crypto_ops_t sha1_crypto_ops = { 243 &sha1_control_ops, 244 &sha1_digest_ops, 245 NULL, 246 &sha1_mac_ops, 247 NULL, 248 NULL, 249 NULL, 250 NULL, 251 NULL, 252 NULL, 253 NULL, 254 NULL, 255 NULL, 256 &sha1_ctx_ops 257}; 258 259static crypto_provider_info_t sha1_prov_info = { 260 CRYPTO_SPI_VERSION_1, 261 "SHA1 Software Provider", 262 CRYPTO_SW_PROVIDER, 263 {&modlinkage}, 264 NULL, 265 &sha1_crypto_ops, 266 sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t), 267 sha1_mech_info_tab 268}; 269 270static crypto_kcf_provider_handle_t sha1_prov_handle = NULL; 271 272int 273_init() | 105extern __inline__ uint32_t bswap(uint32_t value) |
274{ | 106{ |
275 int ret; 276 277 if ((ret = mod_install(&modlinkage)) != 0) 278 return (ret); 279 280 /* 281 * Register with KCF. If the registration fails, log an 282 * error but do not uninstall the module, since the functionality 283 * provided by misc/sha1 should still be available. 284 */ 285 if ((ret = crypto_register_provider(&sha1_prov_info, 286 &sha1_prov_handle)) != CRYPTO_SUCCESS) 287 cmn_err(CE_WARN, "sha1 _init: " 288 "crypto_register_provider() failed (0x%x)", ret); 289 290 return (0); | 107 __asm__("bswap %0" : "+r" (value)); 108 return (value); |
291} 292 | 109} 110 |
293int 294_info(struct modinfo *modinfop) 295{ 296 return (mod_info(&modlinkage, modinfop)); 297} | 111#endif |
298 | 112 |
299#endif /* _KERNEL */ 300 | |
301/* 302 * SHA1Init() 303 * 304 * purpose: initializes the sha1 context and begins and sha1 digest operation 305 * input: SHA1_CTX * : the context to initializes. 306 * output: void 307 */ 308 --- 10 unchanged lines hidden (view full) --- 319 ctx->state[0] = 0x67452301U; 320 ctx->state[1] = 0xefcdab89U; 321 ctx->state[2] = 0x98badcfeU; 322 ctx->state[3] = 0x10325476U; 323 ctx->state[4] = 0xc3d2e1f0U; 324} 325 326#ifdef VIS_SHA1 | 113/* 114 * SHA1Init() 115 * 116 * purpose: initializes the sha1 context and begins and sha1 digest operation 117 * input: SHA1_CTX * : the context to initializes. 118 * output: void 119 */ 120 --- 10 unchanged lines hidden (view full) --- 131 ctx->state[0] = 0x67452301U; 132 ctx->state[1] = 0xefcdab89U; 133 ctx->state[2] = 0x98badcfeU; 134 ctx->state[3] = 0x10325476U; 135 ctx->state[4] = 0xc3d2e1f0U; 136} 137 138#ifdef VIS_SHA1 |
327 328 | |
329#ifdef _KERNEL 330 331#include <sys/regset.h> 332#include <sys/vis.h> 333#include <sys/fpu/fpusystm.h> 334 335/* the alignment for block stores to save fp registers */ 336#define VIS_ALIGN (64) 337 338extern int sha1_savefp(kfpu_t *, int); 339extern void sha1_restorefp(kfpu_t *); 340 341uint32_t vis_sha1_svfp_threshold = 128; 342 | 139#ifdef _KERNEL 140 141#include <sys/regset.h> 142#include <sys/vis.h> 143#include <sys/fpu/fpusystm.h> 144 145/* the alignment for block stores to save fp registers */ 146#define VIS_ALIGN (64) 147 148extern int sha1_savefp(kfpu_t *, int); 149extern void sha1_restorefp(kfpu_t *); 150 151uint32_t vis_sha1_svfp_threshold = 128; 152 |
343#else /* !_KERNEL */ 344 345static boolean_t checked_vis = B_FALSE; 346static int usevis = 0; 347 348static int 349havevis() 350{ 351 char *buf = NULL; 352 char *isa_token; 353 char *lasts; 354 int ret = 0; 355 size_t bufsize = 255; /* UltraSPARC III needs 115 chars */ 356 int v9_isa_token, vis_isa_token, isa_token_num; 357 358 if (checked_vis) { 359 return (usevis); 360 } 361 362 if ((buf = malloc(bufsize)) == NULL) { 363 return (0); 364 } 365 366 if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) { 367 free(buf); 368 return (0); 369 } else if (ret > bufsize) { 370 /* We lost some because our buffer was too small */ 371 if ((buf = realloc(buf, bufsize = ret)) == NULL) { 372 return (0); 373 } 374 if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) { 375 free(buf); 376 return (0); 377 } 378 } 379 380 /* 381 * Check the relative posistions of sparcv9 & sparcv9+vis 382 * because they are listed in (best) performance order. 383 * For example: The Niagara chip reports it has VIS but the 384 * SHA1 code runs faster without this optimisation. 385 */ 386 isa_token = strtok_r(buf, " ", &lasts); 387 v9_isa_token = vis_isa_token = -1; 388 isa_token_num = 0; 389 do { 390 if (strcmp(isa_token, "sparcv9") == 0) { 391 v9_isa_token = isa_token_num; 392 } else if (strcmp(isa_token, "sparcv9+vis") == 0) { 393 vis_isa_token = isa_token_num; 394 } 395 isa_token_num++; 396 } while (isa_token = strtok_r(NULL, " ", &lasts)); 397 398 if (vis_isa_token != -1 && vis_isa_token < v9_isa_token) 399 usevis = 1; 400 free(buf); 401 402 checked_vis = B_TRUE; 403 return (usevis); 404} 405 | |
406#endif /* _KERNEL */ 407 408/* 409 * VIS SHA-1 consts. 410 */ 411static uint64_t VIS[] = { 412 0x8000000080000000ULL, 413 0x0002000200020002ULL, 414 0x5a8279996ed9eba1ULL, 415 0x8f1bbcdcca62c1d6ULL, 416 0x012389ab456789abULL}; 417 | 153#endif /* _KERNEL */ 154 155/* 156 * VIS SHA-1 consts. 157 */ 158static uint64_t VIS[] = { 159 0x8000000080000000ULL, 160 0x0002000200020002ULL, 161 0x5a8279996ed9eba1ULL, 162 0x8f1bbcdcca62c1d6ULL, 163 0x012389ab456789abULL}; 164 |
418extern void SHA1TransformVIS(uint64_t *, uint64_t *, uint32_t *, uint64_t *); | 165extern void SHA1TransformVIS(uint64_t *, uint32_t *, uint32_t *, uint64_t *); |
419 420 421/* 422 * SHA1Update() 423 * 424 * purpose: continues an sha1 digest operation, using the message block 425 * to update the context. 426 * input: SHA1_CTX * : the context to update | 166 167 168/* 169 * SHA1Update() 170 * 171 * purpose: continues an sha1 digest operation, using the message block 172 * to update the context. 173 * input: SHA1_CTX * : the context to update |
427 * uint8_t * : the message block 428 * uint32_t : the length of the message block in bytes | 174 * void * : the message block 175 * size_t : the length of the message block in bytes |
429 * output: void 430 */ 431 432void | 176 * output: void 177 */ 178 179void |
433SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len) | 180SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len) |
434{ 435 uint32_t i, buf_index, buf_len; 436 uint64_t X0[40], input64[8]; | 181{ 182 uint32_t i, buf_index, buf_len; 183 uint64_t X0[40], input64[8]; |
184 const uint8_t *input = inptr; |
|
437#ifdef _KERNEL 438 int usevis = 0; | 185#ifdef _KERNEL 186 int usevis = 0; |
187#else 188 int usevis = 1; |
|
439#endif /* _KERNEL */ 440 441 /* check for noop */ 442 if (input_len == 0) 443 return; 444 445 /* compute number of bytes mod 64 */ 446 buf_index = (ctx->count[1] >> 3) & 0x3F; --- 5 unchanged lines hidden (view full) --- 452 ctx->count[0] += (input_len >> 29); 453 454 buf_len = 64 - buf_index; 455 456 /* transform as many times as possible */ 457 i = 0; 458 if (input_len >= buf_len) { 459#ifdef _KERNEL | 189#endif /* _KERNEL */ 190 191 /* check for noop */ 192 if (input_len == 0) 193 return; 194 195 /* compute number of bytes mod 64 */ 196 buf_index = (ctx->count[1] >> 3) & 0x3F; --- 5 unchanged lines hidden (view full) --- 202 ctx->count[0] += (input_len >> 29); 203 204 buf_len = 64 - buf_index; 205 206 /* transform as many times as possible */ 207 i = 0; 208 if (input_len >= buf_len) { 209#ifdef _KERNEL |
460 uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN]; | |
461 kfpu_t *fpu; | 210 kfpu_t *fpu; |
211 if (fpu_exists) { 212 uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN]; 213 uint32_t len = (input_len + buf_index) & ~0x3f; 214 int svfp_ok; |
|
462 | 215 |
463 uint32_t len = (input_len + buf_index) & ~0x3f; 464 int svfp_ok; 465 466 fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64); 467 svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0); 468 usevis = fpu_exists && sha1_savefp(fpu, svfp_ok); 469#else 470 if (!checked_vis) 471 usevis = havevis(); | 216 fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64); 217 svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0); 218 usevis = fpu_exists && sha1_savefp(fpu, svfp_ok); 219 } else { 220 usevis = 0; 221 } |
472#endif /* _KERNEL */ 473 474 /* 475 * general optimization: 476 * 477 * only do initial bcopy() and SHA1Transform() if 478 * buf_index != 0. if buf_index == 0, we're just 479 * wasting our time doing the bcopy() since there 480 * wasn't any data left over from a previous call to 481 * SHA1Update(). 482 */ 483 484 if (buf_index) { 485 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); 486 if (usevis) { 487 SHA1TransformVIS(X0, | 222#endif /* _KERNEL */ 223 224 /* 225 * general optimization: 226 * 227 * only do initial bcopy() and SHA1Transform() if 228 * buf_index != 0. if buf_index == 0, we're just 229 * wasting our time doing the bcopy() since there 230 * wasn't any data left over from a previous call to 231 * SHA1Update(). 232 */ 233 234 if (buf_index) { 235 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); 236 if (usevis) { 237 SHA1TransformVIS(X0, |
488 (uint64_t *)ctx->buf_un.buf8, | 238 ctx->buf_un.buf32, |
489 &ctx->state[0], VIS); 490 } else { | 239 &ctx->state[0], VIS); 240 } else { |
491 SHA1Transform(ctx->state[0], ctx->state[1], 492 ctx->state[2], ctx->state[3], 493 ctx->state[4], ctx, ctx->buf_un.buf8); | 241 SHA1_TRANSFORM(ctx, ctx->buf_un.buf8); |
494 } 495 i = buf_len; 496 } 497 498 /* 499 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate 500 * SHA-1 processing. This is achieved by "offloading" the 501 * computation of the message schedule (MS) to the VIS units. 502 * This allows the VIS computation of the message schedule 503 * to be performed in parallel with the standard integer 504 * processing of the remainder of the SHA-1 computation. 505 * performance by up to around 1.37X, compared to an optimized 506 * integer-only implementation. 507 * 508 * The VIS implementation of SHA1Transform has a different API 509 * to the standard integer version: 510 * 511 * void SHA1TransformVIS( 512 * uint64_t *, // Pointer to MS for ith block | 242 } 243 i = buf_len; 244 } 245 246 /* 247 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate 248 * SHA-1 processing. This is achieved by "offloading" the 249 * computation of the message schedule (MS) to the VIS units. 250 * This allows the VIS computation of the message schedule 251 * to be performed in parallel with the standard integer 252 * processing of the remainder of the SHA-1 computation. 253 * performance by up to around 1.37X, compared to an optimized 254 * integer-only implementation. 255 * 256 * The VIS implementation of SHA1Transform has a different API 257 * to the standard integer version: 258 * 259 * void SHA1TransformVIS( 260 * uint64_t *, // Pointer to MS for ith block |
513 * uint64_t *, // Pointer to ith block of message data | 261 * uint32_t *, // Pointer to ith block of message data |
514 * uint32_t *, // Pointer to SHA state i.e ctx->state 515 * uint64_t *, // Pointer to various VIS constants 516 * ) 517 * 518 * Note: the message data must by 4-byte aligned. 519 * 520 * Function requires VIS 1.0 support. 521 * 522 * Handling is provided to deal with arbitrary byte alingment 523 * of the input data but the performance gains are reduced 524 * for alignments other than 4-bytes. 525 */ 526 if (usevis) { | 262 * uint32_t *, // Pointer to SHA state i.e ctx->state 263 * uint64_t *, // Pointer to various VIS constants 264 * ) 265 * 266 * Note: the message data must by 4-byte aligned. 267 * 268 * Function requires VIS 1.0 support. 269 * 270 * Handling is provided to deal with arbitrary byte alingment 271 * of the input data but the performance gains are reduced 272 * for alignments other than 4-bytes. 273 */ 274 if (usevis) { |
527 if (((uint64_t)(uintptr_t)(&input[i]) & 0x3)) { | 275 if (!IS_P2ALIGNED(&input[i], sizeof (uint32_t))) { |
528 /* 529 * Main processing loop - input misaligned 530 */ 531 for (; i + 63 < input_len; i += 64) { 532 bcopy(&input[i], input64, 64); | 276 /* 277 * Main processing loop - input misaligned 278 */ 279 for (; i + 63 < input_len; i += 64) { 280 bcopy(&input[i], input64, 64); |
533 SHA1TransformVIS(X0, input64, | 281 SHA1TransformVIS(X0, (uint32_t *)input64, |
534 &ctx->state[0], VIS); 535 } 536 } else { 537 /* 538 * Main processing loop - input 8-byte aligned 539 */ 540 for (; i + 63 < input_len; i += 64) { 541 SHA1TransformVIS(X0, | 282 &ctx->state[0], VIS); 283 } 284 } else { 285 /* 286 * Main processing loop - input 8-byte aligned 287 */ 288 for (; i + 63 < input_len; i += 64) { 289 SHA1TransformVIS(X0, |
542 (uint64_t *)&input[i], | 290 /* LINTED E_BAD_PTR_CAST_ALIGN */ 291 (uint32_t *)&input[i], |
543 &ctx->state[0], VIS); 544 } 545 546 } 547#ifdef _KERNEL 548 sha1_restorefp(fpu); 549#endif /* _KERNEL */ 550 } else { 551 for (; i + 63 < input_len; i += 64) { | 292 &ctx->state[0], VIS); 293 } 294 295 } 296#ifdef _KERNEL 297 sha1_restorefp(fpu); 298#endif /* _KERNEL */ 299 } else { 300 for (; i + 63 < input_len; i += 64) { |
552 SHA1Transform(ctx->state[0], ctx->state[1], 553 ctx->state[2], ctx->state[3], ctx->state[4], 554 ctx, &input[i]); | 301 SHA1_TRANSFORM(ctx, &input[i]); |
555 } 556 } 557 558 /* 559 * general optimization: 560 * 561 * if i and input_len are the same, return now instead 562 * of calling bcopy(), since the bcopy() in this case --- 8 unchanged lines hidden (view full) --- 571 572 /* buffer remaining input */ 573 bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); 574} 575 576#else /* VIS_SHA1 */ 577 578void | 302 } 303 } 304 305 /* 306 * general optimization: 307 * 308 * if i and input_len are the same, return now instead 309 * of calling bcopy(), since the bcopy() in this case --- 8 unchanged lines hidden (view full) --- 318 319 /* buffer remaining input */ 320 bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); 321} 322 323#else /* VIS_SHA1 */ 324 325void |
579SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len) | 326SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len) |
580{ 581 uint32_t i, buf_index, buf_len; | 327{ 328 uint32_t i, buf_index, buf_len; |
329 const uint8_t *input = inptr; |
|
582 583 /* check for noop */ 584 if (input_len == 0) 585 return; 586 587 /* compute number of bytes mod 64 */ 588 buf_index = (ctx->count[1] >> 3) & 0x3F; 589 --- 16 unchanged lines hidden (view full) --- 606 * buf_index != 0. if buf_index == 0, we're just 607 * wasting our time doing the bcopy() since there 608 * wasn't any data left over from a previous call to 609 * SHA1Update(). 610 */ 611 612 if (buf_index) { 613 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); | 330 331 /* check for noop */ 332 if (input_len == 0) 333 return; 334 335 /* compute number of bytes mod 64 */ 336 buf_index = (ctx->count[1] >> 3) & 0x3F; 337 --- 16 unchanged lines hidden (view full) --- 354 * buf_index != 0. if buf_index == 0, we're just 355 * wasting our time doing the bcopy() since there 356 * wasn't any data left over from a previous call to 357 * SHA1Update(). 358 */ 359 360 if (buf_index) { 361 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); |
614 615 616 SHA1Transform(ctx->state[0], ctx->state[1], 617 ctx->state[2], ctx->state[3], ctx->state[4], ctx, 618 ctx->buf_un.buf8); 619 | 362 SHA1_TRANSFORM(ctx, ctx->buf_un.buf8); |
620 i = buf_len; 621 } 622 623 for (; i + 63 < input_len; i += 64) | 363 i = buf_len; 364 } 365 366 for (; i + 63 < input_len; i += 64) |
624 SHA1Transform(ctx->state[0], ctx->state[1], 625 ctx->state[2], ctx->state[3], ctx->state[4], 626 ctx, &input[i]); | 367 SHA1_TRANSFORM(ctx, &input[i]); |
627 628 /* 629 * general optimization: 630 * 631 * if i and input_len are the same, return now instead 632 * of calling bcopy(), since the bcopy() in this case 633 * will be an expensive nop. 634 */ --- 16 unchanged lines hidden (view full) --- 651 * purpose: ends an sha1 digest operation, finalizing the message digest and 652 * zeroing the context. 653 * input: uint8_t * : a buffer to store the digest in 654 * SHA1_CTX * : the context to finalize, save, and zero 655 * output: void 656 */ 657 658void | 368 369 /* 370 * general optimization: 371 * 372 * if i and input_len are the same, return now instead 373 * of calling bcopy(), since the bcopy() in this case 374 * will be an expensive nop. 375 */ --- 16 unchanged lines hidden (view full) --- 392 * purpose: ends an sha1 digest operation, finalizing the message digest and 393 * zeroing the context. 394 * input: uint8_t * : a buffer to store the digest in 395 * SHA1_CTX * : the context to finalize, save, and zero 396 * output: void 397 */ 398 399void |
659SHA1Final(uint8_t *digest, SHA1_CTX *ctx) | 400SHA1Final(void *digest, SHA1_CTX *ctx) |
660{ 661 uint8_t bitcount_be[sizeof (ctx->count)]; 662 uint32_t index = (ctx->count[1] >> 3) & 0x3f; 663 664 /* store bit count, big endian */ 665 Encode(bitcount_be, ctx->count, sizeof (bitcount_be)); 666 667 /* pad out to 56 mod 64 */ --- 4 unchanged lines hidden (view full) --- 672 673 /* store state in digest */ 674 Encode(digest, ctx->state, sizeof (ctx->state)); 675 676 /* zeroize sensitive information */ 677 bzero(ctx, sizeof (*ctx)); 678} 679 | 401{ 402 uint8_t bitcount_be[sizeof (ctx->count)]; 403 uint32_t index = (ctx->count[1] >> 3) & 0x3f; 404 405 /* store bit count, big endian */ 406 Encode(bitcount_be, ctx->count, sizeof (bitcount_be)); 407 408 /* pad out to 56 mod 64 */ --- 4 unchanged lines hidden (view full) --- 413 414 /* store state in digest */ 415 Encode(digest, ctx->state, sizeof (ctx->state)); 416 417 /* zeroize sensitive information */ 418 bzero(ctx, sizeof (*ctx)); 419} 420 |
421#if defined(__amd64) 422typedef uint64_t sha1word; 423#else 424typedef uint32_t sha1word; 425#endif 426 |
|
680/* 681 * sparc optimization: 682 * 683 * on the sparc, we can load big endian 32-bit data easily. note that 684 * special care must be taken to ensure the address is 32-bit aligned. 685 * in the interest of speed, we don't check to make sure, since 686 * careful programming can guarantee this for us. 687 */ 688 689#if defined(_BIG_ENDIAN) 690 691#define LOAD_BIG_32(addr) (*(uint32_t *)(addr)) 692 | 427/* 428 * sparc optimization: 429 * 430 * on the sparc, we can load big endian 32-bit data easily. note that 431 * special care must be taken to ensure the address is 32-bit aligned. 432 * in the interest of speed, we don't check to make sure, since 433 * careful programming can guarantee this for us. 434 */ 435 436#if defined(_BIG_ENDIAN) 437 438#define LOAD_BIG_32(addr) (*(uint32_t *)(addr)) 439 |
693#else /* little endian -- will work on big endian, but slowly */ | 440#else /* !defined(_BIG_ENDIAN) */ |
694 | 441 |
442#if defined(HAVE_BSWAP) 443 444#define LOAD_BIG_32(addr) bswap(*((uint32_t *)(addr))) 445 446#else /* !defined(HAVE_BSWAP) */ 447 448/* little endian -- will work on big endian, but slowly */ |
|
695#define LOAD_BIG_32(addr) \ 696 (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3]) | 449#define LOAD_BIG_32(addr) \ 450 (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3]) |
697#endif | |
698 | 451 |
452#endif /* !defined(HAVE_BSWAP) */ 453 454#endif /* !defined(_BIG_ENDIAN) */ 455 |
|
699/* | 456/* |
457 * SHA1Transform() 458 */ 459#if defined(W_ARRAY) 460#define W(n) w[n] 461#else /* !defined(W_ARRAY) */ 462#define W(n) w_ ## n 463#endif /* !defined(W_ARRAY) */ 464 465 466#if defined(__sparc) 467 468/* |
|
700 * sparc register window optimization: 701 * 702 * `a', `b', `c', `d', and `e' are passed into SHA1Transform 703 * explicitly since it increases the number of registers available to 704 * the compiler. under this scheme, these variables can be held in 705 * %i0 - %i4, which leaves more local and out registers available. | 469 * sparc register window optimization: 470 * 471 * `a', `b', `c', `d', and `e' are passed into SHA1Transform 472 * explicitly since it increases the number of registers available to 473 * the compiler. under this scheme, these variables can be held in 474 * %i0 - %i4, which leaves more local and out registers available. |
706 */ 707 708/* 709 * SHA1Transform() | |
710 * 711 * purpose: sha1 transformation -- updates the digest based on `block' 712 * input: uint32_t : bytes 1 - 4 of the digest 713 * uint32_t : bytes 5 - 8 of the digest 714 * uint32_t : bytes 9 - 12 of the digest 715 * uint32_t : bytes 12 - 16 of the digest 716 * uint32_t : bytes 16 - 20 of the digest 717 * SHA1_CTX * : the context to update --- 34 unchanged lines hidden (view full) --- 752 * 753 * unfortunately, loading from an array in this manner hurts 754 * performance under intel. so, there is a macro, 755 * SHA1_CONST(), used in SHA1Transform(), that either expands to 756 * a reference to this array, or to the actual constant, 757 * depending on what platform this code is compiled for. 758 */ 759 | 475 * 476 * purpose: sha1 transformation -- updates the digest based on `block' 477 * input: uint32_t : bytes 1 - 4 of the digest 478 * uint32_t : bytes 5 - 8 of the digest 479 * uint32_t : bytes 9 - 12 of the digest 480 * uint32_t : bytes 12 - 16 of the digest 481 * uint32_t : bytes 16 - 20 of the digest 482 * SHA1_CTX * : the context to update --- 34 unchanged lines hidden (view full) --- 517 * 518 * unfortunately, loading from an array in this manner hurts 519 * performance under intel. so, there is a macro, 520 * SHA1_CONST(), used in SHA1Transform(), that either expands to 521 * a reference to this array, or to the actual constant, 522 * depending on what platform this code is compiled for. 523 */ 524 |
760#if defined(__sparc) | |
761 static const uint32_t sha1_consts[] = { 762 SHA1_CONST_0, SHA1_CONST_1, SHA1_CONST_2, SHA1_CONST_3, 763 }; | 525 static const uint32_t sha1_consts[] = { 526 SHA1_CONST_0, SHA1_CONST_1, SHA1_CONST_2, SHA1_CONST_3, 527 }; |
764#endif | |
765 766 /* 767 * general optimization: 768 * 769 * use individual integers instead of using an array. this is a 770 * win, although the amount it wins by seems to vary quite a bit. 771 */ 772 --- 13 unchanged lines hidden (view full) --- 786 * even though it's quite tempting to assign to do: 787 * 788 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32)); 789 * 790 * and only have one set of LOAD_BIG_32()'s, the compiler 791 * *does not* like that, so please resist the urge. 792 */ 793 | 528 529 /* 530 * general optimization: 531 * 532 * use individual integers instead of using an array. this is a 533 * win, although the amount it wins by seems to vary quite a bit. 534 */ 535 --- 13 unchanged lines hidden (view full) --- 549 * even though it's quite tempting to assign to do: 550 * 551 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32)); 552 * 553 * and only have one set of LOAD_BIG_32()'s, the compiler 554 * *does not* like that, so please resist the urge. 555 */ 556 |
794#if defined(__sparc) | |
795 if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */ 796 bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32)); 797 w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15); 798 w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14); 799 w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13); 800 w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12); 801 w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11); 802 w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10); --- 36 unchanged lines hidden (view full) --- 839 w_3 = LOAD_BIG_32(blk + 12); 840 /*LINTED*/ 841 w_2 = LOAD_BIG_32(blk + 8); 842 /*LINTED*/ 843 w_1 = LOAD_BIG_32(blk + 4); 844 /*LINTED*/ 845 w_0 = LOAD_BIG_32(blk + 0); 846 } | 557 if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */ 558 bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32)); 559 w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15); 560 w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14); 561 w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13); 562 w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12); 563 w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11); 564 w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10); --- 36 unchanged lines hidden (view full) --- 601 w_3 = LOAD_BIG_32(blk + 12); 602 /*LINTED*/ 603 w_2 = LOAD_BIG_32(blk + 8); 604 /*LINTED*/ 605 w_1 = LOAD_BIG_32(blk + 4); 606 /*LINTED*/ 607 w_0 = LOAD_BIG_32(blk + 0); 608 } |
847#else 848 w_15 = LOAD_BIG_32(blk + 60); 849 w_14 = LOAD_BIG_32(blk + 56); 850 w_13 = LOAD_BIG_32(blk + 52); 851 w_12 = LOAD_BIG_32(blk + 48); 852 w_11 = LOAD_BIG_32(blk + 44); 853 w_10 = LOAD_BIG_32(blk + 40); 854 w_9 = LOAD_BIG_32(blk + 36); 855 w_8 = LOAD_BIG_32(blk + 32); 856 w_7 = LOAD_BIG_32(blk + 28); 857 w_6 = LOAD_BIG_32(blk + 24); 858 w_5 = LOAD_BIG_32(blk + 20); 859 w_4 = LOAD_BIG_32(blk + 16); 860 w_3 = LOAD_BIG_32(blk + 12); 861 w_2 = LOAD_BIG_32(blk + 8); 862 w_1 = LOAD_BIG_32(blk + 4); 863 w_0 = LOAD_BIG_32(blk + 0); 864#endif | 609#else /* !defined(__sparc) */ 610 611void 612SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64]) 613{ 614 sha1word a = ctx->state[0]; 615 sha1word b = ctx->state[1]; 616 sha1word c = ctx->state[2]; 617 sha1word d = ctx->state[3]; 618 sha1word e = ctx->state[4]; 619 620#if defined(W_ARRAY) 621 sha1word w[16]; 622#else /* !defined(W_ARRAY) */ 623 sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7; 624 sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15; 625#endif /* !defined(W_ARRAY) */ 626 627 W(0) = LOAD_BIG_32(blk + 0); 628 W(1) = LOAD_BIG_32(blk + 4); 629 W(2) = LOAD_BIG_32(blk + 8); 630 W(3) = LOAD_BIG_32(blk + 12); 631 W(4) = LOAD_BIG_32(blk + 16); 632 W(5) = LOAD_BIG_32(blk + 20); 633 W(6) = LOAD_BIG_32(blk + 24); 634 W(7) = LOAD_BIG_32(blk + 28); 635 W(8) = LOAD_BIG_32(blk + 32); 636 W(9) = LOAD_BIG_32(blk + 36); 637 W(10) = LOAD_BIG_32(blk + 40); 638 W(11) = LOAD_BIG_32(blk + 44); 639 W(12) = LOAD_BIG_32(blk + 48); 640 W(13) = LOAD_BIG_32(blk + 52); 641 W(14) = LOAD_BIG_32(blk + 56); 642 W(15) = LOAD_BIG_32(blk + 60); 643 644#endif /* !defined(__sparc) */ 645 |
865 /* 866 * general optimization: 867 * 868 * even though this approach is described in the standard as 869 * being slower algorithmically, it is 30-40% faster than the 870 * "faster" version under SPARC, because this version has more 871 * of the constraints specified at compile-time and uses fewer 872 * variables (and therefore has better register utilization) --- 9 unchanged lines hidden (view full) --- 882 * but just pretending that `d' is now `e', etc. this works 883 * really well and obviates the need for a temporary variable. 884 * however, we still explictly perform the rotate action, 885 * since it is cheaper on SPARC to do it once than to have to 886 * do it over and over again. 887 */ 888 889 /* round 1 */ | 646 /* 647 * general optimization: 648 * 649 * even though this approach is described in the standard as 650 * being slower algorithmically, it is 30-40% faster than the 651 * "faster" version under SPARC, because this version has more 652 * of the constraints specified at compile-time and uses fewer 653 * variables (and therefore has better register utilization) --- 9 unchanged lines hidden (view full) --- 663 * but just pretending that `d' is now `e', etc. this works 664 * really well and obviates the need for a temporary variable. 665 * however, we still explictly perform the rotate action, 666 * since it is cheaper on SPARC to do it once than to have to 667 * do it over and over again. 668 */ 669 670 /* round 1 */ |
890 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_0 + SHA1_CONST(0); /* 0 */ | 671 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */ |
891 b = ROTATE_LEFT(b, 30); 892 | 672 b = ROTATE_LEFT(b, 30); 673 |
893 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_1 + SHA1_CONST(0); /* 1 */ | 674 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */ |
894 a = ROTATE_LEFT(a, 30); 895 | 675 a = ROTATE_LEFT(a, 30); 676 |
896 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_2 + SHA1_CONST(0); /* 2 */ | 677 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */ |
897 e = ROTATE_LEFT(e, 30); 898 | 678 e = ROTATE_LEFT(e, 30); 679 |
899 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_3 + SHA1_CONST(0); /* 3 */ | 680 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */ |
900 d = ROTATE_LEFT(d, 30); 901 | 681 d = ROTATE_LEFT(d, 30); 682 |
902 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_4 + SHA1_CONST(0); /* 4 */ | 683 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */ |
903 c = ROTATE_LEFT(c, 30); 904 | 684 c = ROTATE_LEFT(c, 30); 685 |
905 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_5 + SHA1_CONST(0); /* 5 */ | 686 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */ |
906 b = ROTATE_LEFT(b, 30); 907 | 687 b = ROTATE_LEFT(b, 30); 688 |
908 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_6 + SHA1_CONST(0); /* 6 */ | 689 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */ |
909 a = ROTATE_LEFT(a, 30); 910 | 690 a = ROTATE_LEFT(a, 30); 691 |
911 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_7 + SHA1_CONST(0); /* 7 */ | 692 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */ |
912 e = ROTATE_LEFT(e, 30); 913 | 693 e = ROTATE_LEFT(e, 30); 694 |
914 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_8 + SHA1_CONST(0); /* 8 */ | 695 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */ |
915 d = ROTATE_LEFT(d, 30); 916 | 696 d = ROTATE_LEFT(d, 30); 697 |
917 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_9 + SHA1_CONST(0); /* 9 */ | 698 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */ |
918 c = ROTATE_LEFT(c, 30); 919 | 699 c = ROTATE_LEFT(c, 30); 700 |
920 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_10 + SHA1_CONST(0); /* 10 */ | 701 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */ |
921 b = ROTATE_LEFT(b, 30); 922 | 702 b = ROTATE_LEFT(b, 30); 703 |
923 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_11 + SHA1_CONST(0); /* 11 */ | 704 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */ |
924 a = ROTATE_LEFT(a, 30); 925 | 705 a = ROTATE_LEFT(a, 30); 706 |
926 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_12 + SHA1_CONST(0); /* 12 */ | 707 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */ |
927 e = ROTATE_LEFT(e, 30); 928 | 708 e = ROTATE_LEFT(e, 30); 709 |
929 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_13 + SHA1_CONST(0); /* 13 */ | 710 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */ |
930 d = ROTATE_LEFT(d, 30); 931 | 711 d = ROTATE_LEFT(d, 30); 712 |
932 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_14 + SHA1_CONST(0); /* 14 */ | 713 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */ |
933 c = ROTATE_LEFT(c, 30); 934 | 714 c = ROTATE_LEFT(c, 30); 715 |
935 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_15 + SHA1_CONST(0); /* 15 */ | 716 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */ |
936 b = ROTATE_LEFT(b, 30); 937 | 717 b = ROTATE_LEFT(b, 30); 718 |
938 w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1); /* 16 */ 939 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_0 + SHA1_CONST(0); | 719 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */ 720 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0); |
940 a = ROTATE_LEFT(a, 30); 941 | 721 a = ROTATE_LEFT(a, 30); 722 |
942 w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1); /* 17 */ 943 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_1 + SHA1_CONST(0); | 723 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */ 724 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0); |
944 e = ROTATE_LEFT(e, 30); 945 | 725 e = ROTATE_LEFT(e, 30); 726 |
946 w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1); /* 18 */ 947 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_2 + SHA1_CONST(0); | 727 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */ 728 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0); |
948 d = ROTATE_LEFT(d, 30); 949 | 729 d = ROTATE_LEFT(d, 30); 730 |
950 w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1); /* 19 */ 951 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_3 + SHA1_CONST(0); | 731 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */ 732 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0); |
952 c = ROTATE_LEFT(c, 30); 953 954 /* round 2 */ | 733 c = ROTATE_LEFT(c, 30); 734 735 /* round 2 */ |
955 w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1); /* 20 */ 956 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_4 + SHA1_CONST(1); | 736 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */ 737 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1); |
957 b = ROTATE_LEFT(b, 30); 958 | 738 b = ROTATE_LEFT(b, 30); 739 |
959 w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1); /* 21 */ 960 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_5 + SHA1_CONST(1); | 740 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */ 741 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1); |
961 a = ROTATE_LEFT(a, 30); 962 | 742 a = ROTATE_LEFT(a, 30); 743 |
963 w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1); /* 22 */ 964 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_6 + SHA1_CONST(1); | 744 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */ 745 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1); |
965 e = ROTATE_LEFT(e, 30); 966 | 746 e = ROTATE_LEFT(e, 30); 747 |
967 w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1); /* 23 */ 968 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_7 + SHA1_CONST(1); | 748 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */ 749 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1); |
969 d = ROTATE_LEFT(d, 30); 970 | 750 d = ROTATE_LEFT(d, 30); 751 |
971 w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1); /* 24 */ 972 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_8 + SHA1_CONST(1); | 752 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */ 753 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1); |
973 c = ROTATE_LEFT(c, 30); 974 | 754 c = ROTATE_LEFT(c, 30); 755 |
975 w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1); /* 25 */ 976 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_9 + SHA1_CONST(1); | 756 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */ 757 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1); |
977 b = ROTATE_LEFT(b, 30); 978 | 758 b = ROTATE_LEFT(b, 30); 759 |
979 w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1); /* 26 */ 980 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_10 + SHA1_CONST(1); | 760 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */ 761 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1); |
981 a = ROTATE_LEFT(a, 30); 982 | 762 a = ROTATE_LEFT(a, 30); 763 |
983 w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1); /* 27 */ 984 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_11 + SHA1_CONST(1); | 764 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */ 765 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1); |
985 e = ROTATE_LEFT(e, 30); 986 | 766 e = ROTATE_LEFT(e, 30); 767 |
987 w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1); /* 28 */ 988 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_12 + SHA1_CONST(1); | 768 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */ 769 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1); |
989 d = ROTATE_LEFT(d, 30); 990 | 770 d = ROTATE_LEFT(d, 30); 771 |
991 w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1); /* 29 */ 992 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_13 + SHA1_CONST(1); | 772 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */ 773 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1); |
993 c = ROTATE_LEFT(c, 30); 994 | 774 c = ROTATE_LEFT(c, 30); 775 |
995 w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1); /* 30 */ 996 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_14 + SHA1_CONST(1); | 776 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */ 777 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1); |
997 b = ROTATE_LEFT(b, 30); 998 | 778 b = ROTATE_LEFT(b, 30); 779 |
999 w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1); /* 31 */ 1000 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_15 + SHA1_CONST(1); | 780 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */ 781 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1); |
1001 a = ROTATE_LEFT(a, 30); 1002 | 782 a = ROTATE_LEFT(a, 30); 783 |
1003 w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1); /* 32 */ 1004 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_0 + SHA1_CONST(1); | 784 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */ 785 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1); |
1005 e = ROTATE_LEFT(e, 30); 1006 | 786 e = ROTATE_LEFT(e, 30); 787 |
1007 w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1); /* 33 */ 1008 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_1 + SHA1_CONST(1); | 788 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */ 789 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1); |
1009 d = ROTATE_LEFT(d, 30); 1010 | 790 d = ROTATE_LEFT(d, 30); 791 |
1011 w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1); /* 34 */ 1012 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_2 + SHA1_CONST(1); | 792 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */ 793 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1); |
1013 c = ROTATE_LEFT(c, 30); 1014 | 794 c = ROTATE_LEFT(c, 30); 795 |
1015 w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1); /* 35 */ 1016 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_3 + SHA1_CONST(1); | 796 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */ 797 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1); |
1017 b = ROTATE_LEFT(b, 30); 1018 | 798 b = ROTATE_LEFT(b, 30); 799 |
1019 w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1); /* 36 */ 1020 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_4 + SHA1_CONST(1); | 800 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */ 801 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1); |
1021 a = ROTATE_LEFT(a, 30); 1022 | 802 a = ROTATE_LEFT(a, 30); 803 |
1023 w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1); /* 37 */ 1024 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_5 + SHA1_CONST(1); | 804 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */ 805 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1); |
1025 e = ROTATE_LEFT(e, 30); 1026 | 806 e = ROTATE_LEFT(e, 30); 807 |
1027 w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1); /* 38 */ 1028 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_6 + SHA1_CONST(1); | 808 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */ 809 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1); |
1029 d = ROTATE_LEFT(d, 30); 1030 | 810 d = ROTATE_LEFT(d, 30); 811 |
1031 w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1); /* 39 */ 1032 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_7 + SHA1_CONST(1); | 812 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */ 813 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1); |
1033 c = ROTATE_LEFT(c, 30); 1034 1035 /* round 3 */ | 814 c = ROTATE_LEFT(c, 30); 815 816 /* round 3 */ |
1036 w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1); /* 40 */ 1037 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_8 + SHA1_CONST(2); | 817 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */ 818 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2); |
1038 b = ROTATE_LEFT(b, 30); 1039 | 819 b = ROTATE_LEFT(b, 30); 820 |
1040 w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1); /* 41 */ 1041 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_9 + SHA1_CONST(2); | 821 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */ 822 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2); |
1042 a = ROTATE_LEFT(a, 30); 1043 | 823 a = ROTATE_LEFT(a, 30); 824 |
1044 w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1); /* 42 */ 1045 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_10 + SHA1_CONST(2); | 825 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */ 826 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2); |
1046 e = ROTATE_LEFT(e, 30); 1047 | 827 e = ROTATE_LEFT(e, 30); 828 |
1048 w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1); /* 43 */ 1049 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_11 + SHA1_CONST(2); | 829 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */ 830 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2); |
1050 d = ROTATE_LEFT(d, 30); 1051 | 831 d = ROTATE_LEFT(d, 30); 832 |
1052 w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1); /* 44 */ 1053 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_12 + SHA1_CONST(2); | 833 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */ 834 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2); |
1054 c = ROTATE_LEFT(c, 30); 1055 | 835 c = ROTATE_LEFT(c, 30); 836 |
1056 w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1); /* 45 */ 1057 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_13 + SHA1_CONST(2); | 837 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */ 838 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2); |
1058 b = ROTATE_LEFT(b, 30); 1059 | 839 b = ROTATE_LEFT(b, 30); 840 |
1060 w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1); /* 46 */ 1061 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_14 + SHA1_CONST(2); | 841 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */ 842 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2); |
1062 a = ROTATE_LEFT(a, 30); 1063 | 843 a = ROTATE_LEFT(a, 30); 844 |
1064 w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1); /* 47 */ 1065 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_15 + SHA1_CONST(2); | 845 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */ 846 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2); |
1066 e = ROTATE_LEFT(e, 30); 1067 | 847 e = ROTATE_LEFT(e, 30); 848 |
1068 w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1); /* 48 */ 1069 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_0 + SHA1_CONST(2); | 849 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */ 850 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2); |
1070 d = ROTATE_LEFT(d, 30); 1071 | 851 d = ROTATE_LEFT(d, 30); 852 |
1072 w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1); /* 49 */ 1073 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_1 + SHA1_CONST(2); | 853 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */ 854 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2); |
1074 c = ROTATE_LEFT(c, 30); 1075 | 855 c = ROTATE_LEFT(c, 30); 856 |
1076 w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1); /* 50 */ 1077 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_2 + SHA1_CONST(2); | 857 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */ 858 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2); |
1078 b = ROTATE_LEFT(b, 30); 1079 | 859 b = ROTATE_LEFT(b, 30); 860 |
1080 w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1); /* 51 */ 1081 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_3 + SHA1_CONST(2); | 861 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */ 862 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2); |
1082 a = ROTATE_LEFT(a, 30); 1083 | 863 a = ROTATE_LEFT(a, 30); 864 |
1084 w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1); /* 52 */ 1085 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_4 + SHA1_CONST(2); | 865 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */ 866 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2); |
1086 e = ROTATE_LEFT(e, 30); 1087 | 867 e = ROTATE_LEFT(e, 30); 868 |
1088 w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1); /* 53 */ 1089 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_5 + SHA1_CONST(2); | 869 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */ 870 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2); |
1090 d = ROTATE_LEFT(d, 30); 1091 | 871 d = ROTATE_LEFT(d, 30); 872 |
1092 w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1); /* 54 */ 1093 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_6 + SHA1_CONST(2); | 873 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */ 874 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2); |
1094 c = ROTATE_LEFT(c, 30); 1095 | 875 c = ROTATE_LEFT(c, 30); 876 |
1096 w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1); /* 55 */ 1097 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_7 + SHA1_CONST(2); | 877 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */ 878 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2); |
1098 b = ROTATE_LEFT(b, 30); 1099 | 879 b = ROTATE_LEFT(b, 30); 880 |
1100 w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1); /* 56 */ 1101 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_8 + SHA1_CONST(2); | 881 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */ 882 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2); |
1102 a = ROTATE_LEFT(a, 30); 1103 | 883 a = ROTATE_LEFT(a, 30); 884 |
1104 w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1); /* 57 */ 1105 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_9 + SHA1_CONST(2); | 885 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */ 886 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2); |
1106 e = ROTATE_LEFT(e, 30); 1107 | 887 e = ROTATE_LEFT(e, 30); 888 |
1108 w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1); /* 58 */ 1109 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_10 + SHA1_CONST(2); | 889 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */ 890 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2); |
1110 d = ROTATE_LEFT(d, 30); 1111 | 891 d = ROTATE_LEFT(d, 30); 892 |
1112 w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1); /* 59 */ 1113 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_11 + SHA1_CONST(2); | 893 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */ 894 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2); |
1114 c = ROTATE_LEFT(c, 30); 1115 1116 /* round 4 */ | 895 c = ROTATE_LEFT(c, 30); 896 897 /* round 4 */ |
1117 w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1); /* 60 */ 1118 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_12 + SHA1_CONST(3); | 898 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */ 899 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3); |
1119 b = ROTATE_LEFT(b, 30); 1120 | 900 b = ROTATE_LEFT(b, 30); 901 |
1121 w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1); /* 61 */ 1122 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_13 + SHA1_CONST(3); | 902 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */ 903 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3); |
1123 a = ROTATE_LEFT(a, 30); 1124 | 904 a = ROTATE_LEFT(a, 30); 905 |
1125 w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1); /* 62 */ 1126 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_14 + SHA1_CONST(3); | 906 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */ 907 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3); |
1127 e = ROTATE_LEFT(e, 30); 1128 | 908 e = ROTATE_LEFT(e, 30); 909 |
1129 w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1); /* 63 */ 1130 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_15 + SHA1_CONST(3); | 910 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */ 911 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3); |
1131 d = ROTATE_LEFT(d, 30); 1132 | 912 d = ROTATE_LEFT(d, 30); 913 |
1133 w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1); /* 64 */ 1134 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_0 + SHA1_CONST(3); | 914 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */ 915 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3); |
1135 c = ROTATE_LEFT(c, 30); 1136 | 916 c = ROTATE_LEFT(c, 30); 917 |
1137 w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1); /* 65 */ 1138 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_1 + SHA1_CONST(3); | 918 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */ 919 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3); |
1139 b = ROTATE_LEFT(b, 30); 1140 | 920 b = ROTATE_LEFT(b, 30); 921 |
1141 w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1); /* 66 */ 1142 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_2 + SHA1_CONST(3); | 922 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */ 923 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3); |
1143 a = ROTATE_LEFT(a, 30); 1144 | 924 a = ROTATE_LEFT(a, 30); 925 |
1145 w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1); /* 67 */ 1146 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_3 + SHA1_CONST(3); | 926 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */ 927 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3); |
1147 e = ROTATE_LEFT(e, 30); 1148 | 928 e = ROTATE_LEFT(e, 30); 929 |
1149 w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1); /* 68 */ 1150 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_4 + SHA1_CONST(3); | 930 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */ 931 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3); |
1151 d = ROTATE_LEFT(d, 30); 1152 | 932 d = ROTATE_LEFT(d, 30); 933 |
1153 w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1); /* 69 */ 1154 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_5 + SHA1_CONST(3); | 934 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */ 935 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3); |
1155 c = ROTATE_LEFT(c, 30); 1156 | 936 c = ROTATE_LEFT(c, 30); 937 |
1157 w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1); /* 70 */ 1158 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_6 + SHA1_CONST(3); | 938 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */ 939 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3); |
1159 b = ROTATE_LEFT(b, 30); 1160 | 940 b = ROTATE_LEFT(b, 30); 941 |
1161 w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1); /* 71 */ 1162 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_7 + SHA1_CONST(3); | 942 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */ 943 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3); |
1163 a = ROTATE_LEFT(a, 30); 1164 | 944 a = ROTATE_LEFT(a, 30); 945 |
1165 w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1); /* 72 */ 1166 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_8 + SHA1_CONST(3); | 946 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */ 947 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3); |
1167 e = ROTATE_LEFT(e, 30); 1168 | 948 e = ROTATE_LEFT(e, 30); 949 |
1169 w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1); /* 73 */ 1170 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_9 + SHA1_CONST(3); | 950 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */ 951 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3); |
1171 d = ROTATE_LEFT(d, 30); 1172 | 952 d = ROTATE_LEFT(d, 30); 953 |
1173 w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1); /* 74 */ 1174 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_10 + SHA1_CONST(3); | 954 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */ 955 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3); |
1175 c = ROTATE_LEFT(c, 30); 1176 | 956 c = ROTATE_LEFT(c, 30); 957 |
1177 w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1); /* 75 */ 1178 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_11 + SHA1_CONST(3); | 958 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */ 959 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3); |
1179 b = ROTATE_LEFT(b, 30); 1180 | 960 b = ROTATE_LEFT(b, 30); 961 |
1181 w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1); /* 76 */ 1182 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_12 + SHA1_CONST(3); | 962 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */ 963 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3); |
1183 a = ROTATE_LEFT(a, 30); 1184 | 964 a = ROTATE_LEFT(a, 30); 965 |
1185 w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1); /* 77 */ 1186 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_13 + SHA1_CONST(3); | 966 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */ 967 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3); |
1187 e = ROTATE_LEFT(e, 30); 1188 | 968 e = ROTATE_LEFT(e, 30); 969 |
1189 w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1); /* 78 */ 1190 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_14 + SHA1_CONST(3); | 970 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */ 971 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3); |
1191 d = ROTATE_LEFT(d, 30); 1192 | 972 d = ROTATE_LEFT(d, 30); 973 |
1193 w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1); /* 79 */ | 974 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */ |
1194 | 975 |
1195 ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_15 + | 976 ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) + |
1196 SHA1_CONST(3); 1197 ctx->state[1] += b; 1198 ctx->state[2] += ROTATE_LEFT(c, 30); 1199 ctx->state[3] += d; 1200 ctx->state[4] += e; 1201 1202 /* zeroize sensitive information */ | 977 SHA1_CONST(3); 978 ctx->state[1] += b; 979 ctx->state[2] += ROTATE_LEFT(c, 30); 980 ctx->state[3] += d; 981 ctx->state[4] += e; 982 983 /* zeroize sensitive information */ |
1203 w_0 = w_1 = w_2 = w_3 = w_4 = w_5 = w_6 = w_7 = w_8 = 0; 1204 w_9 = w_10 = w_11 = w_12 = w_13 = w_14 = w_15 = 0; | 984 W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0; 985 W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0; |
1205} 1206 1207/* | 986} 987 988/* |
1208 * devpro compiler optimization: 1209 * 1210 * the compiler can generate better code if it knows that `input' and 1211 * `output' do not point to the same source. there is no portable 1212 * way to tell the compiler this, but the sun compiler recognizes the 1213 * `_Restrict' keyword to indicate this condition. use it if possible. 1214 */ 1215 1216#ifdef __RESTRICT 1217#define restrict _Restrict 1218#else 1219#define restrict /* nothing */ 1220#endif 1221 1222/* | |
1223 * Encode() 1224 * 1225 * purpose: to convert a list of numbers from little endian to big endian 1226 * input: uint8_t * : place to store the converted big endian numbers 1227 * uint32_t * : place to get numbers to convert from 1228 * size_t : the length of the input in bytes 1229 * output: void 1230 */ 1231 1232static void | 989 * Encode() 990 * 991 * purpose: to convert a list of numbers from little endian to big endian 992 * input: uint8_t * : place to store the converted big endian numbers 993 * uint32_t * : place to get numbers to convert from 994 * size_t : the length of the input in bytes 995 * output: void 996 */ 997 998static void |
1233Encode(uint8_t *restrict output, uint32_t *restrict input, size_t len) | 999Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input, 1000 size_t len) |
1234{ 1235 size_t i, j; 1236 1237#if defined(__sparc) 1238 if (IS_P2ALIGNED(output, sizeof (uint32_t))) { 1239 for (i = 0, j = 0; j < len; i++, j += 4) { 1240 /* LINTED: pointer alignment */ 1241 *((uint32_t *)(output + j)) = input[i]; --- 5 unchanged lines hidden (view full) --- 1247 output[j + 1] = (input[i] >> 16) & 0xff; 1248 output[j + 2] = (input[i] >> 8) & 0xff; 1249 output[j + 3] = input[i] & 0xff; 1250 } 1251#if defined(__sparc) 1252 } 1253#endif 1254} | 1001{ 1002 size_t i, j; 1003 1004#if defined(__sparc) 1005 if (IS_P2ALIGNED(output, sizeof (uint32_t))) { 1006 for (i = 0, j = 0; j < len; i++, j += 4) { 1007 /* LINTED: pointer alignment */ 1008 *((uint32_t *)(output + j)) = input[i]; --- 5 unchanged lines hidden (view full) --- 1014 output[j + 1] = (input[i] >> 16) & 0xff; 1015 output[j + 2] = (input[i] >> 8) & 0xff; 1016 output[j + 3] = input[i] & 0xff; 1017 } 1018#if defined(__sparc) 1019 } 1020#endif 1021} |
1255 1256 1257#ifdef _KERNEL 1258 1259/* 1260 * KCF software provider control entry points. 1261 */ 1262/* ARGSUSED */ 1263static void 1264sha1_provider_status(crypto_provider_handle_t provider, uint_t *status) 1265{ 1266 *status = CRYPTO_PROVIDER_READY; 1267} 1268 1269/* 1270 * KCF software provider digest entry points. 1271 */ 1272 1273static int 1274sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 1275 crypto_req_handle_t req) 1276{ 1277 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE) 1278 return (CRYPTO_MECHANISM_INVALID); 1279 1280 /* 1281 * Allocate and initialize SHA1 context. 1282 */ 1283 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t), 1284 crypto_kmflag(req)); 1285 if (ctx->cc_provider_private == NULL) 1286 return (CRYPTO_HOST_MEMORY); 1287 1288 PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE; 1289 SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx); 1290 1291 return (CRYPTO_SUCCESS); 1292} 1293 1294/* 1295 * Helper SHA1 digest update function for uio data. 1296 */ 1297static int 1298sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data) 1299{ 1300 off_t offset = data->cd_offset; 1301 size_t length = data->cd_length; 1302 uint_t vec_idx; 1303 size_t cur_len; 1304 1305 /* we support only kernel buffer */ 1306 if (data->cd_uio->uio_segflg != UIO_SYSSPACE) 1307 return (CRYPTO_ARGUMENTS_BAD); 1308 1309 /* 1310 * Jump to the first iovec containing data to be 1311 * digested. 1312 */ 1313 for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt && 1314 offset >= data->cd_uio->uio_iov[vec_idx].iov_len; 1315 offset -= data->cd_uio->uio_iov[vec_idx++].iov_len); 1316 if (vec_idx == data->cd_uio->uio_iovcnt) { 1317 /* 1318 * The caller specified an offset that is larger than the 1319 * total size of the buffers it provided. 1320 */ 1321 return (CRYPTO_DATA_LEN_RANGE); 1322 } 1323 1324 /* 1325 * Now do the digesting on the iovecs. 1326 */ 1327 while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) { 1328 cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len - 1329 offset, length); 1330 1331 SHA1Update(sha1_ctx, 1332 (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset, 1333 cur_len); 1334 1335 length -= cur_len; 1336 vec_idx++; 1337 offset = 0; 1338 } 1339 1340 if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) { 1341 /* 1342 * The end of the specified iovec's was reached but 1343 * the length requested could not be processed, i.e. 1344 * The caller requested to digest more data than it provided. 1345 */ 1346 return (CRYPTO_DATA_LEN_RANGE); 1347 } 1348 1349 return (CRYPTO_SUCCESS); 1350} 1351 1352/* 1353 * Helper SHA1 digest final function for uio data. 1354 * digest_len is the length of the desired digest. If digest_len 1355 * is smaller than the default SHA1 digest length, the caller 1356 * must pass a scratch buffer, digest_scratch, which must 1357 * be at least SHA1_DIGEST_LENGTH bytes. 1358 */ 1359static int 1360sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest, 1361 ulong_t digest_len, uchar_t *digest_scratch) 1362{ 1363 off_t offset = digest->cd_offset; 1364 uint_t vec_idx; 1365 1366 /* we support only kernel buffer */ 1367 if (digest->cd_uio->uio_segflg != UIO_SYSSPACE) 1368 return (CRYPTO_ARGUMENTS_BAD); 1369 1370 /* 1371 * Jump to the first iovec containing ptr to the digest to 1372 * be returned. 1373 */ 1374 for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len && 1375 vec_idx < digest->cd_uio->uio_iovcnt; 1376 offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len); 1377 if (vec_idx == digest->cd_uio->uio_iovcnt) { 1378 /* 1379 * The caller specified an offset that is 1380 * larger than the total size of the buffers 1381 * it provided. 1382 */ 1383 return (CRYPTO_DATA_LEN_RANGE); 1384 } 1385 1386 if (offset + digest_len <= 1387 digest->cd_uio->uio_iov[vec_idx].iov_len) { 1388 /* 1389 * The computed SHA1 digest will fit in the current 1390 * iovec. 1391 */ 1392 if (digest_len != SHA1_DIGEST_LENGTH) { 1393 /* 1394 * The caller requested a short digest. Digest 1395 * into a scratch buffer and return to 1396 * the user only what was requested. 1397 */ 1398 SHA1Final(digest_scratch, sha1_ctx); 1399 bcopy(digest_scratch, (uchar_t *)digest-> 1400 cd_uio->uio_iov[vec_idx].iov_base + offset, 1401 digest_len); 1402 } else { 1403 SHA1Final((uchar_t *)digest-> 1404 cd_uio->uio_iov[vec_idx].iov_base + offset, 1405 sha1_ctx); 1406 } 1407 } else { 1408 /* 1409 * The computed digest will be crossing one or more iovec's. 1410 * This is bad performance-wise but we need to support it. 1411 * Allocate a small scratch buffer on the stack and 1412 * copy it piece meal to the specified digest iovec's. 1413 */ 1414 uchar_t digest_tmp[SHA1_DIGEST_LENGTH]; 1415 off_t scratch_offset = 0; 1416 size_t length = digest_len; 1417 size_t cur_len; 1418 1419 SHA1Final(digest_tmp, sha1_ctx); 1420 1421 while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) { 1422 cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len - 1423 offset, length); 1424 bcopy(digest_tmp + scratch_offset, 1425 digest->cd_uio->uio_iov[vec_idx].iov_base + offset, 1426 cur_len); 1427 1428 length -= cur_len; 1429 vec_idx++; 1430 scratch_offset += cur_len; 1431 offset = 0; 1432 } 1433 1434 if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) { 1435 /* 1436 * The end of the specified iovec's was reached but 1437 * the length requested could not be processed, i.e. 1438 * The caller requested to digest more data than it 1439 * provided. 1440 */ 1441 return (CRYPTO_DATA_LEN_RANGE); 1442 } 1443 } 1444 1445 return (CRYPTO_SUCCESS); 1446} 1447 1448/* 1449 * Helper SHA1 digest update for mblk's. 1450 */ 1451static int 1452sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data) 1453{ 1454 off_t offset = data->cd_offset; 1455 size_t length = data->cd_length; 1456 mblk_t *mp; 1457 size_t cur_len; 1458 1459 /* 1460 * Jump to the first mblk_t containing data to be digested. 1461 */ 1462 for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp); 1463 offset -= MBLKL(mp), mp = mp->b_cont); 1464 if (mp == NULL) { 1465 /* 1466 * The caller specified an offset that is larger than the 1467 * total size of the buffers it provided. 1468 */ 1469 return (CRYPTO_DATA_LEN_RANGE); 1470 } 1471 1472 /* 1473 * Now do the digesting on the mblk chain. 1474 */ 1475 while (mp != NULL && length > 0) { 1476 cur_len = MIN(MBLKL(mp) - offset, length); 1477 SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len); 1478 length -= cur_len; 1479 offset = 0; 1480 mp = mp->b_cont; 1481 } 1482 1483 if (mp == NULL && length > 0) { 1484 /* 1485 * The end of the mblk was reached but the length requested 1486 * could not be processed, i.e. The caller requested 1487 * to digest more data than it provided. 1488 */ 1489 return (CRYPTO_DATA_LEN_RANGE); 1490 } 1491 1492 return (CRYPTO_SUCCESS); 1493} 1494 1495/* 1496 * Helper SHA1 digest final for mblk's. 1497 * digest_len is the length of the desired digest. If digest_len 1498 * is smaller than the default SHA1 digest length, the caller 1499 * must pass a scratch buffer, digest_scratch, which must 1500 * be at least SHA1_DIGEST_LENGTH bytes. 1501 */ 1502static int 1503sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest, 1504 ulong_t digest_len, uchar_t *digest_scratch) 1505{ 1506 off_t offset = digest->cd_offset; 1507 mblk_t *mp; 1508 1509 /* 1510 * Jump to the first mblk_t that will be used to store the digest. 1511 */ 1512 for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp); 1513 offset -= MBLKL(mp), mp = mp->b_cont); 1514 if (mp == NULL) { 1515 /* 1516 * The caller specified an offset that is larger than the 1517 * total size of the buffers it provided. 1518 */ 1519 return (CRYPTO_DATA_LEN_RANGE); 1520 } 1521 1522 if (offset + digest_len <= MBLKL(mp)) { 1523 /* 1524 * The computed SHA1 digest will fit in the current mblk. 1525 * Do the SHA1Final() in-place. 1526 */ 1527 if (digest_len != SHA1_DIGEST_LENGTH) { 1528 /* 1529 * The caller requested a short digest. Digest 1530 * into a scratch buffer and return to 1531 * the user only what was requested. 1532 */ 1533 SHA1Final(digest_scratch, sha1_ctx); 1534 bcopy(digest_scratch, mp->b_rptr + offset, digest_len); 1535 } else { 1536 SHA1Final(mp->b_rptr + offset, sha1_ctx); 1537 } 1538 } else { 1539 /* 1540 * The computed digest will be crossing one or more mblk's. 1541 * This is bad performance-wise but we need to support it. 1542 * Allocate a small scratch buffer on the stack and 1543 * copy it piece meal to the specified digest iovec's. 1544 */ 1545 uchar_t digest_tmp[SHA1_DIGEST_LENGTH]; 1546 off_t scratch_offset = 0; 1547 size_t length = digest_len; 1548 size_t cur_len; 1549 1550 SHA1Final(digest_tmp, sha1_ctx); 1551 1552 while (mp != NULL && length > 0) { 1553 cur_len = MIN(MBLKL(mp) - offset, length); 1554 bcopy(digest_tmp + scratch_offset, 1555 mp->b_rptr + offset, cur_len); 1556 1557 length -= cur_len; 1558 mp = mp->b_cont; 1559 scratch_offset += cur_len; 1560 offset = 0; 1561 } 1562 1563 if (mp == NULL && length > 0) { 1564 /* 1565 * The end of the specified mblk was reached but 1566 * the length requested could not be processed, i.e. 1567 * The caller requested to digest more data than it 1568 * provided. 1569 */ 1570 return (CRYPTO_DATA_LEN_RANGE); 1571 } 1572 } 1573 1574 return (CRYPTO_SUCCESS); 1575} 1576 1577/* ARGSUSED */ 1578static int 1579sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, 1580 crypto_req_handle_t req) 1581{ 1582 int ret = CRYPTO_SUCCESS; 1583 1584 ASSERT(ctx->cc_provider_private != NULL); 1585 1586 /* 1587 * We need to just return the length needed to store the output. 1588 * We should not destroy the context for the following cases. 1589 */ 1590 if ((digest->cd_length == 0) || 1591 (digest->cd_length < SHA1_DIGEST_LENGTH)) { 1592 digest->cd_length = SHA1_DIGEST_LENGTH; 1593 return (CRYPTO_BUFFER_TOO_SMALL); 1594 } 1595 1596 /* 1597 * Do the SHA1 update on the specified input data. 1598 */ 1599 switch (data->cd_format) { 1600 case CRYPTO_DATA_RAW: 1601 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1602 (uint8_t *)data->cd_raw.iov_base + data->cd_offset, 1603 data->cd_length); 1604 break; 1605 case CRYPTO_DATA_UIO: 1606 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1607 data); 1608 break; 1609 case CRYPTO_DATA_MBLK: 1610 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1611 data); 1612 break; 1613 default: 1614 ret = CRYPTO_ARGUMENTS_BAD; 1615 } 1616 1617 if (ret != CRYPTO_SUCCESS) { 1618 /* the update failed, free context and bail */ 1619 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t)); 1620 ctx->cc_provider_private = NULL; 1621 digest->cd_length = 0; 1622 return (ret); 1623 } 1624 1625 /* 1626 * Do a SHA1 final, must be done separately since the digest 1627 * type can be different than the input data type. 1628 */ 1629 switch (digest->cd_format) { 1630 case CRYPTO_DATA_RAW: 1631 SHA1Final((unsigned char *)digest->cd_raw.iov_base + 1632 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx); 1633 break; 1634 case CRYPTO_DATA_UIO: 1635 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1636 digest, SHA1_DIGEST_LENGTH, NULL); 1637 break; 1638 case CRYPTO_DATA_MBLK: 1639 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1640 digest, SHA1_DIGEST_LENGTH, NULL); 1641 break; 1642 default: 1643 ret = CRYPTO_ARGUMENTS_BAD; 1644 } 1645 1646 /* all done, free context and return */ 1647 1648 if (ret == CRYPTO_SUCCESS) { 1649 digest->cd_length = SHA1_DIGEST_LENGTH; 1650 } else { 1651 digest->cd_length = 0; 1652 } 1653 1654 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t)); 1655 ctx->cc_provider_private = NULL; 1656 return (ret); 1657} 1658 1659/* ARGSUSED */ 1660static int 1661sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data, 1662 crypto_req_handle_t req) 1663{ 1664 int ret = CRYPTO_SUCCESS; 1665 1666 ASSERT(ctx->cc_provider_private != NULL); 1667 1668 /* 1669 * Do the SHA1 update on the specified input data. 1670 */ 1671 switch (data->cd_format) { 1672 case CRYPTO_DATA_RAW: 1673 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1674 (uint8_t *)data->cd_raw.iov_base + data->cd_offset, 1675 data->cd_length); 1676 break; 1677 case CRYPTO_DATA_UIO: 1678 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1679 data); 1680 break; 1681 case CRYPTO_DATA_MBLK: 1682 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1683 data); 1684 break; 1685 default: 1686 ret = CRYPTO_ARGUMENTS_BAD; 1687 } 1688 1689 return (ret); 1690} 1691 1692/* ARGSUSED */ 1693static int 1694sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest, 1695 crypto_req_handle_t req) 1696{ 1697 int ret = CRYPTO_SUCCESS; 1698 1699 ASSERT(ctx->cc_provider_private != NULL); 1700 1701 /* 1702 * We need to just return the length needed to store the output. 1703 * We should not destroy the context for the following cases. 1704 */ 1705 if ((digest->cd_length == 0) || 1706 (digest->cd_length < SHA1_DIGEST_LENGTH)) { 1707 digest->cd_length = SHA1_DIGEST_LENGTH; 1708 return (CRYPTO_BUFFER_TOO_SMALL); 1709 } 1710 1711 /* 1712 * Do a SHA1 final. 1713 */ 1714 switch (digest->cd_format) { 1715 case CRYPTO_DATA_RAW: 1716 SHA1Final((unsigned char *)digest->cd_raw.iov_base + 1717 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx); 1718 break; 1719 case CRYPTO_DATA_UIO: 1720 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1721 digest, SHA1_DIGEST_LENGTH, NULL); 1722 break; 1723 case CRYPTO_DATA_MBLK: 1724 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx, 1725 digest, SHA1_DIGEST_LENGTH, NULL); 1726 break; 1727 default: 1728 ret = CRYPTO_ARGUMENTS_BAD; 1729 } 1730 1731 /* all done, free context and return */ 1732 1733 if (ret == CRYPTO_SUCCESS) { 1734 digest->cd_length = SHA1_DIGEST_LENGTH; 1735 } else { 1736 digest->cd_length = 0; 1737 } 1738 1739 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t)); 1740 ctx->cc_provider_private = NULL; 1741 1742 return (ret); 1743} 1744 1745/* ARGSUSED */ 1746static int 1747sha1_digest_atomic(crypto_provider_handle_t provider, 1748 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 1749 crypto_data_t *data, crypto_data_t *digest, 1750 crypto_req_handle_t req) 1751{ 1752 int ret = CRYPTO_SUCCESS; 1753 SHA1_CTX sha1_ctx; 1754 1755 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE) 1756 return (CRYPTO_MECHANISM_INVALID); 1757 1758 /* 1759 * Do the SHA1 init. 1760 */ 1761 SHA1Init(&sha1_ctx); 1762 1763 /* 1764 * Do the SHA1 update on the specified input data. 1765 */ 1766 switch (data->cd_format) { 1767 case CRYPTO_DATA_RAW: 1768 SHA1Update(&sha1_ctx, 1769 (uint8_t *)data->cd_raw.iov_base + data->cd_offset, 1770 data->cd_length); 1771 break; 1772 case CRYPTO_DATA_UIO: 1773 ret = sha1_digest_update_uio(&sha1_ctx, data); 1774 break; 1775 case CRYPTO_DATA_MBLK: 1776 ret = sha1_digest_update_mblk(&sha1_ctx, data); 1777 break; 1778 default: 1779 ret = CRYPTO_ARGUMENTS_BAD; 1780 } 1781 1782 if (ret != CRYPTO_SUCCESS) { 1783 /* the update failed, bail */ 1784 digest->cd_length = 0; 1785 return (ret); 1786 } 1787 1788 /* 1789 * Do a SHA1 final, must be done separately since the digest 1790 * type can be different than the input data type. 1791 */ 1792 switch (digest->cd_format) { 1793 case CRYPTO_DATA_RAW: 1794 SHA1Final((unsigned char *)digest->cd_raw.iov_base + 1795 digest->cd_offset, &sha1_ctx); 1796 break; 1797 case CRYPTO_DATA_UIO: 1798 ret = sha1_digest_final_uio(&sha1_ctx, digest, 1799 SHA1_DIGEST_LENGTH, NULL); 1800 break; 1801 case CRYPTO_DATA_MBLK: 1802 ret = sha1_digest_final_mblk(&sha1_ctx, digest, 1803 SHA1_DIGEST_LENGTH, NULL); 1804 break; 1805 default: 1806 ret = CRYPTO_ARGUMENTS_BAD; 1807 } 1808 1809 if (ret == CRYPTO_SUCCESS) { 1810 digest->cd_length = SHA1_DIGEST_LENGTH; 1811 } else { 1812 digest->cd_length = 0; 1813 } 1814 1815 return (ret); 1816} 1817 1818/* 1819 * KCF software provider mac entry points. 1820 * 1821 * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text)) 1822 * 1823 * Init: 1824 * The initialization routine initializes what we denote 1825 * as the inner and outer contexts by doing 1826 * - for inner context: SHA1(key XOR ipad) 1827 * - for outer context: SHA1(key XOR opad) 1828 * 1829 * Update: 1830 * Each subsequent SHA1 HMAC update will result in an 1831 * update of the inner context with the specified data. 1832 * 1833 * Final: 1834 * The SHA1 HMAC final will do a SHA1 final operation on the 1835 * inner context, and the resulting digest will be used 1836 * as the data for an update on the outer context. Last 1837 * but not least, a SHA1 final on the outer context will 1838 * be performed to obtain the SHA1 HMAC digest to return 1839 * to the user. 1840 */ 1841 1842/* 1843 * Initialize a SHA1-HMAC context. 1844 */ 1845static void 1846sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes) 1847{ 1848 uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK]; 1849 uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK]; 1850 uint_t i; 1851 1852 bzero(ipad, SHA1_HMAC_BLOCK_SIZE); 1853 bzero(opad, SHA1_HMAC_BLOCK_SIZE); 1854 1855 bcopy(keyval, ipad, length_in_bytes); 1856 bcopy(keyval, opad, length_in_bytes); 1857 1858 /* XOR key with ipad (0x36) and opad (0x5c) */ 1859 for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) { 1860 ipad[i] ^= 0x36363636; 1861 opad[i] ^= 0x5c5c5c5c; 1862 } 1863 1864 /* perform SHA1 on ipad */ 1865 SHA1Init(&ctx->hc_icontext); 1866 SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE); 1867 1868 /* perform SHA1 on opad */ 1869 SHA1Init(&ctx->hc_ocontext); 1870 SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE); 1871} 1872 1873/* 1874 */ 1875static int 1876sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 1877 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 1878 crypto_req_handle_t req) 1879{ 1880 int ret = CRYPTO_SUCCESS; 1881 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); 1882 1883 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE && 1884 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE) 1885 return (CRYPTO_MECHANISM_INVALID); 1886 1887 /* Add support for key by attributes (RFE 4706552) */ 1888 if (key->ck_format != CRYPTO_KEY_RAW) 1889 return (CRYPTO_ARGUMENTS_BAD); 1890 1891 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t), 1892 crypto_kmflag(req)); 1893 if (ctx->cc_provider_private == NULL) 1894 return (CRYPTO_HOST_MEMORY); 1895 1896 if (ctx_template != NULL) { 1897 /* reuse context template */ 1898 bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx), 1899 sizeof (sha1_hmac_ctx_t)); 1900 } else { 1901 /* no context template, compute context */ 1902 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) { 1903 uchar_t digested_key[SHA1_DIGEST_LENGTH]; 1904 sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private; 1905 1906 /* 1907 * Hash the passed-in key to get a smaller key. 1908 * The inner context is used since it hasn't been 1909 * initialized yet. 1910 */ 1911 PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext, 1912 key->ck_data, keylen_in_bytes, digested_key); 1913 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx), 1914 digested_key, SHA1_DIGEST_LENGTH); 1915 } else { 1916 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx), 1917 key->ck_data, keylen_in_bytes); 1918 } 1919 } 1920 1921 /* 1922 * Get the mechanism parameters, if applicable. 1923 */ 1924 PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type; 1925 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) { 1926 if (mechanism->cm_param == NULL || 1927 mechanism->cm_param_len != sizeof (ulong_t)) 1928 ret = CRYPTO_MECHANISM_PARAM_INVALID; 1929 PROV_SHA1_GET_DIGEST_LEN(mechanism, 1930 PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len); 1931 if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len > 1932 SHA1_DIGEST_LENGTH) 1933 ret = CRYPTO_MECHANISM_PARAM_INVALID; 1934 } 1935 1936 if (ret != CRYPTO_SUCCESS) { 1937 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t)); 1938 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t)); 1939 ctx->cc_provider_private = NULL; 1940 } 1941 1942 return (ret); 1943} 1944 1945/* ARGSUSED */ 1946static int 1947sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req) 1948{ 1949 int ret = CRYPTO_SUCCESS; 1950 1951 ASSERT(ctx->cc_provider_private != NULL); 1952 1953 /* 1954 * Do a SHA1 update of the inner context using the specified 1955 * data. 1956 */ 1957 switch (data->cd_format) { 1958 case CRYPTO_DATA_RAW: 1959 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, 1960 (uint8_t *)data->cd_raw.iov_base + data->cd_offset, 1961 data->cd_length); 1962 break; 1963 case CRYPTO_DATA_UIO: 1964 ret = sha1_digest_update_uio( 1965 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data); 1966 break; 1967 case CRYPTO_DATA_MBLK: 1968 ret = sha1_digest_update_mblk( 1969 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data); 1970 break; 1971 default: 1972 ret = CRYPTO_ARGUMENTS_BAD; 1973 } 1974 1975 return (ret); 1976} 1977 1978/* ARGSUSED */ 1979static int 1980sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req) 1981{ 1982 int ret = CRYPTO_SUCCESS; 1983 uchar_t digest[SHA1_DIGEST_LENGTH]; 1984 uint32_t digest_len = SHA1_DIGEST_LENGTH; 1985 1986 ASSERT(ctx->cc_provider_private != NULL); 1987 1988 if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type == 1989 SHA1_HMAC_GEN_MECH_INFO_TYPE) 1990 digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len; 1991 1992 /* 1993 * We need to just return the length needed to store the output. 1994 * We should not destroy the context for the following cases. 1995 */ 1996 if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) { 1997 mac->cd_length = digest_len; 1998 return (CRYPTO_BUFFER_TOO_SMALL); 1999 } 2000 2001 /* 2002 * Do a SHA1 final on the inner context. 2003 */ 2004 SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext); 2005 2006 /* 2007 * Do a SHA1 update on the outer context, feeding the inner 2008 * digest as data. 2009 */ 2010 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest, 2011 SHA1_DIGEST_LENGTH); 2012 2013 /* 2014 * Do a SHA1 final on the outer context, storing the computing 2015 * digest in the users buffer. 2016 */ 2017 switch (mac->cd_format) { 2018 case CRYPTO_DATA_RAW: 2019 if (digest_len != SHA1_DIGEST_LENGTH) { 2020 /* 2021 * The caller requested a short digest. Digest 2022 * into a scratch buffer and return to 2023 * the user only what was requested. 2024 */ 2025 SHA1Final(digest, 2026 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext); 2027 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base + 2028 mac->cd_offset, digest_len); 2029 } else { 2030 SHA1Final((unsigned char *)mac->cd_raw.iov_base + 2031 mac->cd_offset, 2032 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext); 2033 } 2034 break; 2035 case CRYPTO_DATA_UIO: 2036 ret = sha1_digest_final_uio( 2037 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac, 2038 digest_len, digest); 2039 break; 2040 case CRYPTO_DATA_MBLK: 2041 ret = sha1_digest_final_mblk( 2042 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac, 2043 digest_len, digest); 2044 break; 2045 default: 2046 ret = CRYPTO_ARGUMENTS_BAD; 2047 } 2048 2049 if (ret == CRYPTO_SUCCESS) { 2050 mac->cd_length = digest_len; 2051 } else { 2052 mac->cd_length = 0; 2053 } 2054 2055 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t)); 2056 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t)); 2057 ctx->cc_provider_private = NULL; 2058 2059 return (ret); 2060} 2061 2062#define SHA1_MAC_UPDATE(data, ctx, ret) { \ 2063 switch (data->cd_format) { \ 2064 case CRYPTO_DATA_RAW: \ 2065 SHA1Update(&(ctx).hc_icontext, \ 2066 (uint8_t *)data->cd_raw.iov_base + \ 2067 data->cd_offset, data->cd_length); \ 2068 break; \ 2069 case CRYPTO_DATA_UIO: \ 2070 ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \ 2071 break; \ 2072 case CRYPTO_DATA_MBLK: \ 2073 ret = sha1_digest_update_mblk(&(ctx).hc_icontext, \ 2074 data); \ 2075 break; \ 2076 default: \ 2077 ret = CRYPTO_ARGUMENTS_BAD; \ 2078 } \ 2079} 2080 2081/* ARGSUSED */ 2082static int 2083sha1_mac_atomic(crypto_provider_handle_t provider, 2084 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 2085 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, 2086 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 2087{ 2088 int ret = CRYPTO_SUCCESS; 2089 uchar_t digest[SHA1_DIGEST_LENGTH]; 2090 sha1_hmac_ctx_t sha1_hmac_ctx; 2091 uint32_t digest_len = SHA1_DIGEST_LENGTH; 2092 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); 2093 2094 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE && 2095 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE) 2096 return (CRYPTO_MECHANISM_INVALID); 2097 2098 /* Add support for key by attributes (RFE 4706552) */ 2099 if (key->ck_format != CRYPTO_KEY_RAW) 2100 return (CRYPTO_ARGUMENTS_BAD); 2101 2102 if (ctx_template != NULL) { 2103 /* reuse context template */ 2104 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t)); 2105 } else { 2106 /* no context template, initialize context */ 2107 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) { 2108 /* 2109 * Hash the passed-in key to get a smaller key. 2110 * The inner context is used since it hasn't been 2111 * initialized yet. 2112 */ 2113 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext, 2114 key->ck_data, keylen_in_bytes, digest); 2115 sha1_mac_init_ctx(&sha1_hmac_ctx, digest, 2116 SHA1_DIGEST_LENGTH); 2117 } else { 2118 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data, 2119 keylen_in_bytes); 2120 } 2121 } 2122 2123 /* get the mechanism parameters, if applicable */ 2124 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) { 2125 if (mechanism->cm_param == NULL || 2126 mechanism->cm_param_len != sizeof (ulong_t)) { 2127 ret = CRYPTO_MECHANISM_PARAM_INVALID; 2128 goto bail; 2129 } 2130 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len); 2131 if (digest_len > SHA1_DIGEST_LENGTH) { 2132 ret = CRYPTO_MECHANISM_PARAM_INVALID; 2133 goto bail; 2134 } 2135 } 2136 2137 /* do a SHA1 update of the inner context using the specified data */ 2138 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret); 2139 if (ret != CRYPTO_SUCCESS) 2140 /* the update failed, free context and bail */ 2141 goto bail; 2142 2143 /* 2144 * Do a SHA1 final on the inner context. 2145 */ 2146 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext); 2147 2148 /* 2149 * Do an SHA1 update on the outer context, feeding the inner 2150 * digest as data. 2151 */ 2152 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH); 2153 2154 /* 2155 * Do a SHA1 final on the outer context, storing the computed 2156 * digest in the users buffer. 2157 */ 2158 switch (mac->cd_format) { 2159 case CRYPTO_DATA_RAW: 2160 if (digest_len != SHA1_DIGEST_LENGTH) { 2161 /* 2162 * The caller requested a short digest. Digest 2163 * into a scratch buffer and return to 2164 * the user only what was requested. 2165 */ 2166 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext); 2167 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base + 2168 mac->cd_offset, digest_len); 2169 } else { 2170 SHA1Final((unsigned char *)mac->cd_raw.iov_base + 2171 mac->cd_offset, &sha1_hmac_ctx.hc_ocontext); 2172 } 2173 break; 2174 case CRYPTO_DATA_UIO: 2175 ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac, 2176 digest_len, digest); 2177 break; 2178 case CRYPTO_DATA_MBLK: 2179 ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac, 2180 digest_len, digest); 2181 break; 2182 default: 2183 ret = CRYPTO_ARGUMENTS_BAD; 2184 } 2185 2186 if (ret == CRYPTO_SUCCESS) { 2187 mac->cd_length = digest_len; 2188 } else { 2189 mac->cd_length = 0; 2190 } 2191 /* Extra paranoia: zeroize the context on the stack */ 2192 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t)); 2193 2194 return (ret); 2195bail: 2196 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t)); 2197 mac->cd_length = 0; 2198 return (ret); 2199} 2200 2201/* ARGSUSED */ 2202static int 2203sha1_mac_verify_atomic(crypto_provider_handle_t provider, 2204 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 2205 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, 2206 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 2207{ 2208 int ret = CRYPTO_SUCCESS; 2209 uchar_t digest[SHA1_DIGEST_LENGTH]; 2210 sha1_hmac_ctx_t sha1_hmac_ctx; 2211 uint32_t digest_len = SHA1_DIGEST_LENGTH; 2212 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); 2213 2214 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE && 2215 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE) 2216 return (CRYPTO_MECHANISM_INVALID); 2217 2218 /* Add support for key by attributes (RFE 4706552) */ 2219 if (key->ck_format != CRYPTO_KEY_RAW) 2220 return (CRYPTO_ARGUMENTS_BAD); 2221 2222 if (ctx_template != NULL) { 2223 /* reuse context template */ 2224 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t)); 2225 } else { 2226 /* no context template, initialize context */ 2227 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) { 2228 /* 2229 * Hash the passed-in key to get a smaller key. 2230 * The inner context is used since it hasn't been 2231 * initialized yet. 2232 */ 2233 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext, 2234 key->ck_data, keylen_in_bytes, digest); 2235 sha1_mac_init_ctx(&sha1_hmac_ctx, digest, 2236 SHA1_DIGEST_LENGTH); 2237 } else { 2238 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data, 2239 keylen_in_bytes); 2240 } 2241 } 2242 2243 /* get the mechanism parameters, if applicable */ 2244 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) { 2245 if (mechanism->cm_param == NULL || 2246 mechanism->cm_param_len != sizeof (ulong_t)) { 2247 ret = CRYPTO_MECHANISM_PARAM_INVALID; 2248 goto bail; 2249 } 2250 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len); 2251 if (digest_len > SHA1_DIGEST_LENGTH) { 2252 ret = CRYPTO_MECHANISM_PARAM_INVALID; 2253 goto bail; 2254 } 2255 } 2256 2257 if (mac->cd_length != digest_len) { 2258 ret = CRYPTO_INVALID_MAC; 2259 goto bail; 2260 } 2261 2262 /* do a SHA1 update of the inner context using the specified data */ 2263 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret); 2264 if (ret != CRYPTO_SUCCESS) 2265 /* the update failed, free context and bail */ 2266 goto bail; 2267 2268 /* do a SHA1 final on the inner context */ 2269 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext); 2270 2271 /* 2272 * Do an SHA1 update on the outer context, feeding the inner 2273 * digest as data. 2274 */ 2275 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH); 2276 2277 /* 2278 * Do a SHA1 final on the outer context, storing the computed 2279 * digest in the users buffer. 2280 */ 2281 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext); 2282 2283 /* 2284 * Compare the computed digest against the expected digest passed 2285 * as argument. 2286 */ 2287 2288 switch (mac->cd_format) { 2289 2290 case CRYPTO_DATA_RAW: 2291 if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base + 2292 mac->cd_offset, digest_len) != 0) 2293 ret = CRYPTO_INVALID_MAC; 2294 break; 2295 2296 case CRYPTO_DATA_UIO: { 2297 off_t offset = mac->cd_offset; 2298 uint_t vec_idx; 2299 off_t scratch_offset = 0; 2300 size_t length = digest_len; 2301 size_t cur_len; 2302 2303 /* we support only kernel buffer */ 2304 if (mac->cd_uio->uio_segflg != UIO_SYSSPACE) 2305 return (CRYPTO_ARGUMENTS_BAD); 2306 2307 /* jump to the first iovec containing the expected digest */ 2308 for (vec_idx = 0; 2309 offset >= mac->cd_uio->uio_iov[vec_idx].iov_len && 2310 vec_idx < mac->cd_uio->uio_iovcnt; 2311 offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len); 2312 if (vec_idx == mac->cd_uio->uio_iovcnt) { 2313 /* 2314 * The caller specified an offset that is 2315 * larger than the total size of the buffers 2316 * it provided. 2317 */ 2318 ret = CRYPTO_DATA_LEN_RANGE; 2319 break; 2320 } 2321 2322 /* do the comparison of computed digest vs specified one */ 2323 while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) { 2324 cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len - 2325 offset, length); 2326 2327 if (bcmp(digest + scratch_offset, 2328 mac->cd_uio->uio_iov[vec_idx].iov_base + offset, 2329 cur_len) != 0) { 2330 ret = CRYPTO_INVALID_MAC; 2331 break; 2332 } 2333 2334 length -= cur_len; 2335 vec_idx++; 2336 scratch_offset += cur_len; 2337 offset = 0; 2338 } 2339 break; 2340 } 2341 2342 case CRYPTO_DATA_MBLK: { 2343 off_t offset = mac->cd_offset; 2344 mblk_t *mp; 2345 off_t scratch_offset = 0; 2346 size_t length = digest_len; 2347 size_t cur_len; 2348 2349 /* jump to the first mblk_t containing the expected digest */ 2350 for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp); 2351 offset -= MBLKL(mp), mp = mp->b_cont); 2352 if (mp == NULL) { 2353 /* 2354 * The caller specified an offset that is larger than 2355 * the total size of the buffers it provided. 2356 */ 2357 ret = CRYPTO_DATA_LEN_RANGE; 2358 break; 2359 } 2360 2361 while (mp != NULL && length > 0) { 2362 cur_len = MIN(MBLKL(mp) - offset, length); 2363 if (bcmp(digest + scratch_offset, 2364 mp->b_rptr + offset, cur_len) != 0) { 2365 ret = CRYPTO_INVALID_MAC; 2366 break; 2367 } 2368 2369 length -= cur_len; 2370 mp = mp->b_cont; 2371 scratch_offset += cur_len; 2372 offset = 0; 2373 } 2374 break; 2375 } 2376 2377 default: 2378 ret = CRYPTO_ARGUMENTS_BAD; 2379 } 2380 2381 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t)); 2382 return (ret); 2383bail: 2384 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t)); 2385 mac->cd_length = 0; 2386 return (ret); 2387} 2388 2389/* 2390 * KCF software provider context management entry points. 2391 */ 2392 2393/* ARGSUSED */ 2394static int 2395sha1_create_ctx_template(crypto_provider_handle_t provider, 2396 crypto_mechanism_t *mechanism, crypto_key_t *key, 2397 crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size, 2398 crypto_req_handle_t req) 2399{ 2400 sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl; 2401 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); 2402 2403 if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) && 2404 (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) { 2405 return (CRYPTO_MECHANISM_INVALID); 2406 } 2407 2408 /* Add support for key by attributes (RFE 4706552) */ 2409 if (key->ck_format != CRYPTO_KEY_RAW) 2410 return (CRYPTO_ARGUMENTS_BAD); 2411 2412 /* 2413 * Allocate and initialize SHA1 context. 2414 */ 2415 sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t), 2416 crypto_kmflag(req)); 2417 if (sha1_hmac_ctx_tmpl == NULL) 2418 return (CRYPTO_HOST_MEMORY); 2419 2420 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) { 2421 uchar_t digested_key[SHA1_DIGEST_LENGTH]; 2422 2423 /* 2424 * Hash the passed-in key to get a smaller key. 2425 * The inner context is used since it hasn't been 2426 * initialized yet. 2427 */ 2428 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext, 2429 key->ck_data, keylen_in_bytes, digested_key); 2430 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key, 2431 SHA1_DIGEST_LENGTH); 2432 } else { 2433 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data, 2434 keylen_in_bytes); 2435 } 2436 2437 sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type; 2438 *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl; 2439 *ctx_template_size = sizeof (sha1_hmac_ctx_t); 2440 2441 2442 return (CRYPTO_SUCCESS); 2443} 2444 2445static int 2446sha1_free_context(crypto_ctx_t *ctx) 2447{ 2448 uint_t ctx_len; 2449 sha1_mech_type_t mech_type; 2450 2451 if (ctx->cc_provider_private == NULL) 2452 return (CRYPTO_SUCCESS); 2453 2454 /* 2455 * We have to free either SHA1 or SHA1-HMAC contexts, which 2456 * have different lengths. 2457 */ 2458 2459 mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type; 2460 if (mech_type == SHA1_MECH_INFO_TYPE) 2461 ctx_len = sizeof (sha1_ctx_t); 2462 else { 2463 ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE || 2464 mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE); 2465 ctx_len = sizeof (sha1_hmac_ctx_t); 2466 } 2467 2468 bzero(ctx->cc_provider_private, ctx_len); 2469 kmem_free(ctx->cc_provider_private, ctx_len); 2470 ctx->cc_provider_private = NULL; 2471 2472 return (CRYPTO_SUCCESS); 2473} 2474 2475#endif /* _KERNEL */ | |