xref: /illumos-gate/usr/src/common/crypto/sha1/sha1.c (revision 7c478bd9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * The basic framework for this code came from the reference
31  * implementation for MD5.  That implementation is Copyright (C)
32  * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
33  *
34  * License to copy and use this software is granted provided that it
35  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
36  * Algorithm" in all material mentioning or referencing this software
37  * or this function.
38  *
39  * License is also granted to make and use derivative works provided
40  * that such works are identified as "derived from the RSA Data
41  * Security, Inc. MD5 Message-Digest Algorithm" in all material
42  * mentioning or referencing the derived work.
43  *
44  * RSA Data Security, Inc. makes no representations concerning either
45  * the merchantability of this software or the suitability of this
46  * software for any particular purpose. It is provided "as is"
47  * without express or implied warranty of any kind.
48  *
49  * These notices must be retained in any copies of any part of this
50  * documentation and/or software.
51  *
52  * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
53  * standard, available at http://www.itl.nist.gov/div897/pubs/fip180-1.htm
54  * Not as fast as one would like -- further optimizations are encouraged
55  * and appreciated.
56  */
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysmacros.h>
62 #include <sys/sha1.h>
63 #include <sys/sha1_consts.h>
64 
65 #ifdef _KERNEL
66 
67 #include <sys/modctl.h>
68 #include <sys/cmn_err.h>
69 #include <sys/note.h>
70 #include <sys/crypto/common.h>
71 #include <sys/crypto/spi.h>
72 #include <sys/strsun.h>
73 
74 /*
75  * The sha1 module is created with two modlinkages:
76  * - a modlmisc that allows consumers to directly call the entry points
77  *   SHA1Init, SHA1Update, and SHA1Final.
78  * - a modlcrypto that allows the module to register with the Kernel
79  *   Cryptographic Framework (KCF) as a software provider for the SHA1
80  *   mechanisms.
81  */
82 
83 #endif /* _KERNEL */
84 #ifndef	_KERNEL
85 #include <strings.h>
86 #include <stdlib.h>
87 #include <errno.h>
88 #include <sys/systeminfo.h>
89 #endif	/* !_KERNEL */
90 
91 static void Encode(uint8_t *, uint32_t *, size_t);
92 static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
93     SHA1_CTX *, const uint8_t *);
94 
95 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
96 
97 /*
98  * F, G, and H are the basic SHA1 functions.
99  */
100 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
101 #define	G(b, c, d)	((b) ^ (c) ^ (d))
102 #define	H(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
103 
104 /*
105  * ROTATE_LEFT rotates x left n bits.
106  */
107 #define	ROTATE_LEFT(x, n)	\
108 	(((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
109 
110 #ifdef _KERNEL
111 
112 static struct modlmisc modlmisc = {
113 	&mod_miscops,
114 	"SHA1 Message-Digest Algorithm"
115 };
116 
117 static struct modlcrypto modlcrypto = {
118 	&mod_cryptoops,
119 	"SHA1 Kernel SW Provider %I%"
120 };
121 
122 static struct modlinkage modlinkage = {
123 	MODREV_1, &modlmisc, &modlcrypto, NULL
124 };
125 
126 /*
127  * CSPI information (entry points, provider info, etc.)
128  */
129 
130 typedef enum sha1_mech_type {
131 	SHA1_MECH_INFO_TYPE,		/* SUN_CKM_SHA1 */
132 	SHA1_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_SHA1_HMAC */
133 	SHA1_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_SHA1_HMAC_GENERAL */
134 } sha1_mech_type_t;
135 
136 #define	SHA1_DIGEST_LENGTH	20	/* SHA1 digest length in bytes */
137 #define	SHA1_HMAC_BLOCK_SIZE	64	/* SHA1-HMAC block size */
138 #define	SHA1_HMAC_MIN_KEY_LEN	8	/* SHA1-HMAC min key length in bits */
139 #define	SHA1_HMAC_MAX_KEY_LEN	INT_MAX /* SHA1-HMAC max key length in bits */
140 #define	SHA1_HMAC_INTS_PER_BLOCK	(SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
141 
142 /*
143  * Context for SHA1 mechanism.
144  */
145 typedef struct sha1_ctx {
146 	sha1_mech_type_t	sc_mech_type;	/* type of context */
147 	SHA1_CTX		sc_sha1_ctx;	/* SHA1 context */
148 } sha1_ctx_t;
149 
150 /*
151  * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
152  */
153 typedef struct sha1_hmac_ctx {
154 	sha1_mech_type_t	hc_mech_type;	/* type of context */
155 	uint32_t		hc_digest_len;	/* digest len in bytes */
156 	SHA1_CTX		hc_icontext;	/* inner SHA1 context */
157 	SHA1_CTX		hc_ocontext;	/* outer SHA1 context */
158 } sha1_hmac_ctx_t;
159 
160 /*
161  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
162  * by KCF to one of the entry points.
163  */
164 
165 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
166 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
167 
168 /* to extract the digest length passed as mechanism parameter */
169 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
170 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
171 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
172 	else {								\
173 		ulong_t tmp_ulong;					\
174 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
175 		(len) = (uint32_t)tmp_ulong;				\
176 	}								\
177 }
178 
179 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
180 	SHA1Init(ctx);					\
181 	SHA1Update(ctx, key, len);			\
182 	SHA1Final(digest, ctx);				\
183 }
184 
185 /*
186  * Mechanism info structure passed to KCF during registration.
187  */
188 static crypto_mech_info_t sha1_mech_info_tab[] = {
189 	/* SHA1 */
190 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
191 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
192 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
193 	/* SHA1-HMAC */
194 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
195 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
196 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
197 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
198 	/* SHA1-HMAC GENERAL */
199 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
200 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
201 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
202 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
203 };
204 
205 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
206 
207 static crypto_control_ops_t sha1_control_ops = {
208 	sha1_provider_status
209 };
210 
211 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
212     crypto_req_handle_t);
213 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
214     crypto_req_handle_t);
215 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
216     crypto_req_handle_t);
217 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
218     crypto_req_handle_t);
219 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
220     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
221     crypto_req_handle_t);
222 
223 static crypto_digest_ops_t sha1_digest_ops = {
224 	sha1_digest_init,
225 	sha1_digest,
226 	sha1_digest_update,
227 	NULL,
228 	sha1_digest_final,
229 	sha1_digest_atomic
230 };
231 
232 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
233     crypto_spi_ctx_template_t, crypto_req_handle_t);
234 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
235     crypto_req_handle_t);
236 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
237 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
238     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
239     crypto_spi_ctx_template_t, crypto_req_handle_t);
240 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
241     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
242     crypto_spi_ctx_template_t, crypto_req_handle_t);
243 
244 static crypto_mac_ops_t sha1_mac_ops = {
245 	sha1_mac_init,
246 	NULL,
247 	sha1_mac_update,
248 	sha1_mac_final,
249 	sha1_mac_atomic,
250 	sha1_mac_verify_atomic
251 };
252 
253 static int sha1_create_ctx_template(crypto_provider_handle_t,
254     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
255     size_t *, crypto_req_handle_t);
256 static int sha1_free_context(crypto_ctx_t *);
257 
258 static crypto_ctx_ops_t sha1_ctx_ops = {
259 	sha1_create_ctx_template,
260 	sha1_free_context
261 };
262 
263 static crypto_ops_t sha1_crypto_ops = {
264 	&sha1_control_ops,
265 	&sha1_digest_ops,
266 	NULL,
267 	&sha1_mac_ops,
268 	NULL,
269 	NULL,
270 	NULL,
271 	NULL,
272 	NULL,
273 	NULL,
274 	NULL,
275 	NULL,
276 	NULL,
277 	&sha1_ctx_ops
278 };
279 
280 static crypto_provider_info_t sha1_prov_info = {
281 	CRYPTO_SPI_VERSION_1,
282 	"SHA1 Software Provider",
283 	CRYPTO_SW_PROVIDER,
284 	{&modlinkage},
285 	NULL,
286 	&sha1_crypto_ops,
287 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
288 	sha1_mech_info_tab
289 };
290 
291 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
292 
293 int
294 _init()
295 {
296 	int ret;
297 
298 	if ((ret = mod_install(&modlinkage)) != 0)
299 		return (ret);
300 
301 	/*
302 	 * Register with KCF. If the registration fails, log an
303 	 * error but do not uninstall the module, since the functionality
304 	 * provided by misc/sha1 should still be available.
305 	 */
306 	if ((ret = crypto_register_provider(&sha1_prov_info,
307 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
308 		cmn_err(CE_WARN, "sha1 _init: "
309 		    "crypto_register_provider() failed (0x%x)", ret);
310 
311 	return (0);
312 }
313 
314 int
315 _info(struct modinfo *modinfop)
316 {
317 	return (mod_info(&modlinkage, modinfop));
318 }
319 
320 #endif /* _KERNEL */
321 
322 /*
323  * SHA1Init()
324  *
325  * purpose: initializes the sha1 context and begins and sha1 digest operation
326  *   input: SHA1_CTX *	: the context to initializes.
327  *  output: void
328  */
329 
330 void
331 SHA1Init(SHA1_CTX *ctx)
332 {
333 	ctx->count[0] = ctx->count[1] = 0;
334 
335 	/*
336 	 * load magic initialization constants. Tell lint
337 	 * that these constants are unsigned by using U.
338 	 */
339 
340 	ctx->state[0] = 0x67452301U;
341 	ctx->state[1] = 0xefcdab89U;
342 	ctx->state[2] = 0x98badcfeU;
343 	ctx->state[3] = 0x10325476U;
344 	ctx->state[4] = 0xc3d2e1f0U;
345 }
346 
347 #ifdef VIS_SHA1
348 
349 static int usevis = 0;
350 
351 #ifdef _KERNEL
352 
353 #include <sys/regset.h>
354 #include <sys/vis.h>
355 
356 /* the alignment for block stores to save fp registers */
357 #define	VIS_ALIGN	(64)
358 
359 extern int sha1_savefp(kfpu_t *, int);
360 extern void sha1_restorefp(kfpu_t *);
361 
362 uint32_t	vis_sha1_svfp_threshold = 128;
363 
364 #else /* !_KERNEL */
365 
366 static boolean_t checked_vis = B_FALSE;
367 
368 static int
369 havevis()
370 {
371 	char *buf = NULL;
372 	char *isa_token;
373 	char *lasts;
374 	int ret = 0;
375 	size_t bufsize = 255; /* UltraSPARC III needs 115 chars */
376 	int v9_isa_token, vis_isa_token, isa_token_num;
377 
378 	if (checked_vis) {
379 		return (usevis);
380 	}
381 
382 	if ((buf = malloc(bufsize)) == NULL) {
383 		return (0);
384 	}
385 
386 	if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
387 		free(buf);
388 		return (0);
389 	} else if (ret > bufsize) {
390 		/* We lost some because our buffer was too small  */
391 		if ((buf = realloc(buf, bufsize = ret)) == NULL) {
392 			return (0);
393 		}
394 		if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
395 			free(buf);
396 			return (0);
397 		}
398 	}
399 
400 	/*
401 	 * Check the relative posistions of sparcv9 & sparcv9+vis
402 	 * because they are listed in (best) performance order.
403 	 * For example: The Niagara chip reports it has VIS but the
404 	 * SHA1 code runs faster without this optimisation.
405 	 */
406 	isa_token = strtok_r(buf, " ", &lasts);
407 	v9_isa_token = vis_isa_token = -1;
408 	isa_token_num = 0;
409 	do {
410 		if (strcmp(isa_token, "sparcv9") == 0) {
411 			v9_isa_token = isa_token_num;
412 		} else if (strcmp(isa_token, "sparcv9+vis") == 0) {
413 			vis_isa_token = isa_token_num;
414 		}
415 		isa_token_num++;
416 	} while (isa_token = strtok_r(NULL, " ", &lasts));
417 
418 	if (vis_isa_token != -1 && vis_isa_token < v9_isa_token)
419 		usevis = 1;
420 	free(buf);
421 
422 	checked_vis = B_TRUE;
423 	return (usevis);
424 }
425 
426 #endif /* _KERNEL */
427 
428 /*
429  * VIS SHA-1 consts.
430  */
431 static uint64_t VIS[] = {
432 	0x8000000080000000,
433 	0x0002000200020002,
434 	0x5a8279996ed9eba1,
435 	0x8f1bbcdcca62c1d6,
436 	0x012389ab456789ab};
437 
438 extern void SHA1TransformVIS(uint64_t *, uint64_t *, uint32_t *, uint64_t *);
439 
440 
441 /*
442  * SHA1Update()
443  *
444  * purpose: continues an sha1 digest operation, using the message block
445  *          to update the context.
446  *   input: SHA1_CTX *	: the context to update
447  *          uint8_t *	: the message block
448  *          uint32_t    : the length of the message block in bytes
449  *  output: void
450  */
451 
452 void
453 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
454 {
455 	uint32_t i, buf_index, buf_len;
456 
457 	uint64_t X0[40], input64[8];
458 
459 	/* check for noop */
460 	if (input_len == 0)
461 		return;
462 
463 	/* compute number of bytes mod 64 */
464 	buf_index = (ctx->count[1] >> 3) & 0x3F;
465 
466 	/* update number of bits */
467 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
468 		ctx->count[0]++;
469 
470 	ctx->count[0] += (input_len >> 29);
471 
472 	buf_len = 64 - buf_index;
473 
474 	/* transform as many times as possible */
475 	i = 0;
476 	if (input_len >= buf_len) {
477 #ifdef _KERNEL
478 		uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN];
479 		kfpu_t *fpu;
480 
481 		uint32_t len = (input_len + buf_index) & ~0x3f;
482 		int svfp_ok;
483 
484 		fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64);
485 		svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0);
486 		usevis = sha1_savefp(fpu, svfp_ok);
487 #else
488 		if (!checked_vis)
489 			usevis = havevis();
490 #endif /* _KERNEL */
491 
492 		/*
493 		 * general optimization:
494 		 *
495 		 * only do initial bcopy() and SHA1Transform() if
496 		 * buf_index != 0.  if buf_index == 0, we're just
497 		 * wasting our time doing the bcopy() since there
498 		 * wasn't any data left over from a previous call to
499 		 * SHA1Update().
500 		 */
501 
502 		if (buf_index) {
503 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
504 			if (usevis) {
505 				SHA1TransformVIS(X0,
506 				    (uint64_t *)ctx->buf_un.buf8,
507 				    &ctx->state[0], VIS);
508 			} else {
509 				SHA1Transform(ctx->state[0], ctx->state[1],
510 				    ctx->state[2], ctx->state[3],
511 				    ctx->state[4], ctx, ctx->buf_un.buf8);
512 			}
513 			i = buf_len;
514 		}
515 
516 		/*
517 		 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate
518 		 * SHA-1 processing. This is achieved by "offloading" the
519 		 * computation of the message schedule (MS) to the VIS units.
520 		 * This allows the VIS computation of the message schedule
521 		 * to be performed in parallel with the standard integer
522 		 * processing of the remainder of the SHA-1 computation.
523 		 * performance by up to around 1.37X, compared to an optimized
524 		 * integer-only implementation.
525 		 *
526 		 * The VIS implementation of SHA1Transform has a different API
527 		 * to the standard integer version:
528 		 *
529 		 * void SHA1TransformVIS(
530 		 *	 uint64_t *, // Pointer to MS for ith block
531 		 *	 uint64_t *, // Pointer to ith block of message data
532 		 *	 uint32_t *, // Pointer to SHA state i.e ctx->state
533 		 *	 uint64_t *, // Pointer to various VIS constants
534 		 * )
535 		 *
536 		 * Note: the message data must by 4-byte aligned.
537 		 *
538 		 * Function requires VIS 1.0 support.
539 		 *
540 		 * Handling is provided to deal with arbitrary byte alingment
541 		 * of the input data but the performance gains are reduced
542 		 * for alignments other than 4-bytes.
543 		 */
544 		if (usevis) {
545 			if (((uint64_t)(&input[i]) & 0x3)) {
546 				/*
547 				 * Main processing loop - input misaligned
548 				 */
549 				for (; i + 63 < input_len; i += 64) {
550 				    bcopy(&input[i], input64, 64);
551 				    SHA1TransformVIS(X0, input64,
552 					&ctx->state[0], VIS);
553 				}
554 			} else {
555 				/*
556 				 * Main processing loop - input 8-byte aligned
557 				 */
558 				for (; i + 63 < input_len; i += 64) {
559 					SHA1TransformVIS(X0,
560 					    (uint64_t *)&input[i],
561 					    &ctx->state[0], VIS);
562 				}
563 
564 			}
565 #ifdef _KERNEL
566 			sha1_restorefp(fpu);
567 #endif /* _KERNEL */
568 		} else {
569 			for (; i + 63 < input_len; i += 64) {
570 			    SHA1Transform(ctx->state[0], ctx->state[1],
571 				ctx->state[2], ctx->state[3], ctx->state[4],
572 				ctx, &input[i]);
573 			}
574 		}
575 
576 		/*
577 		 * general optimization:
578 		 *
579 		 * if i and input_len are the same, return now instead
580 		 * of calling bcopy(), since the bcopy() in this case
581 		 * will be an expensive nop.
582 		 */
583 
584 		if (input_len == i)
585 			return;
586 
587 		buf_index = 0;
588 	}
589 
590 	/* buffer remaining input */
591 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
592 }
593 
594 #else /* VIS_SHA1 */
595 
596 void
597 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
598 {
599 	uint32_t i, buf_index, buf_len;
600 
601 	/* check for noop */
602 	if (input_len == 0)
603 		return;
604 
605 	/* compute number of bytes mod 64 */
606 	buf_index = (ctx->count[1] >> 3) & 0x3F;
607 
608 	/* update number of bits */
609 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
610 		ctx->count[0]++;
611 
612 	ctx->count[0] += (input_len >> 29);
613 
614 	buf_len = 64 - buf_index;
615 
616 	/* transform as many times as possible */
617 	i = 0;
618 	if (input_len >= buf_len) {
619 
620 		/*
621 		 * general optimization:
622 		 *
623 		 * only do initial bcopy() and SHA1Transform() if
624 		 * buf_index != 0.  if buf_index == 0, we're just
625 		 * wasting our time doing the bcopy() since there
626 		 * wasn't any data left over from a previous call to
627 		 * SHA1Update().
628 		 */
629 
630 		if (buf_index) {
631 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
632 
633 
634 			SHA1Transform(ctx->state[0], ctx->state[1],
635 			    ctx->state[2], ctx->state[3], ctx->state[4], ctx,
636 			    ctx->buf_un.buf8);
637 
638 			i = buf_len;
639 		}
640 
641 		for (; i + 63 < input_len; i += 64)
642 			SHA1Transform(ctx->state[0], ctx->state[1],
643 			    ctx->state[2], ctx->state[3], ctx->state[4],
644 			    ctx, &input[i]);
645 
646 		/*
647 		 * general optimization:
648 		 *
649 		 * if i and input_len are the same, return now instead
650 		 * of calling bcopy(), since the bcopy() in this case
651 		 * will be an expensive nop.
652 		 */
653 
654 		if (input_len == i)
655 			return;
656 
657 		buf_index = 0;
658 	}
659 
660 	/* buffer remaining input */
661 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
662 }
663 
664 #endif /* VIS_SHA1 */
665 
666 /*
667  * SHA1Final()
668  *
669  * purpose: ends an sha1 digest operation, finalizing the message digest and
670  *          zeroing the context.
671  *   input: uint8_t *	: a buffer to store the digest in
672  *          SHA1_CTX *  : the context to finalize, save, and zero
673  *  output: void
674  */
675 
676 void
677 SHA1Final(uint8_t *digest, SHA1_CTX *ctx)
678 {
679 	uint8_t		bitcount_be[sizeof (ctx->count)];
680 	uint32_t	index = (ctx->count[1] >> 3) & 0x3f;
681 
682 	/* store bit count, big endian */
683 	Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
684 
685 	/* pad out to 56 mod 64 */
686 	SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
687 
688 	/* append length (before padding) */
689 	SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
690 
691 	/* store state in digest */
692 	Encode(digest, ctx->state, sizeof (ctx->state));
693 }
694 
695 /*
696  * sparc optimization:
697  *
698  * on the sparc, we can load big endian 32-bit data easily.  note that
699  * special care must be taken to ensure the address is 32-bit aligned.
700  * in the interest of speed, we don't check to make sure, since
701  * careful programming can guarantee this for us.
702  */
703 
704 #if	defined(_BIG_ENDIAN)
705 
706 #define	LOAD_BIG_32(addr)	(*(uint32_t *)(addr))
707 
708 #else	/* little endian -- will work on big endian, but slowly */
709 
710 #define	LOAD_BIG_32(addr)	\
711 	(((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
712 #endif
713 
714 /*
715  * sparc register window optimization:
716  *
717  * `a', `b', `c', `d', and `e' are passed into SHA1Transform
718  * explicitly since it increases the number of registers available to
719  * the compiler.  under this scheme, these variables can be held in
720  * %i0 - %i4, which leaves more local and out registers available.
721  */
722 
723 /*
724  * SHA1Transform()
725  *
726  * purpose: sha1 transformation -- updates the digest based on `block'
727  *   input: uint32_t	: bytes  1 -  4 of the digest
728  *          uint32_t	: bytes  5 -  8 of the digest
729  *          uint32_t	: bytes  9 - 12 of the digest
730  *          uint32_t	: bytes 12 - 16 of the digest
731  *          uint32_t	: bytes 16 - 20 of the digest
732  *          SHA1_CTX *	: the context to update
733  *          uint8_t [64]: the block to use to update the digest
734  *  output: void
735  */
736 
737 void
738 SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
739     SHA1_CTX *ctx, const uint8_t blk[64])
740 {
741 	/*
742 	 * sparc optimization:
743 	 *
744 	 * while it is somewhat counter-intuitive, on sparc, it is
745 	 * more efficient to place all the constants used in this
746 	 * function in an array and load the values out of the array
747 	 * than to manually load the constants.  this is because
748 	 * setting a register to a 32-bit value takes two ops in most
749 	 * cases: a `sethi' and an `or', but loading a 32-bit value
750 	 * from memory only takes one `ld' (or `lduw' on v9).  while
751 	 * this increases memory usage, the compiler can find enough
752 	 * other things to do while waiting to keep the pipeline does
753 	 * not stall.  additionally, it is likely that many of these
754 	 * constants are cached so that later accesses do not even go
755 	 * out to the bus.
756 	 *
757 	 * this array is declared `static' to keep the compiler from
758 	 * having to bcopy() this array onto the stack frame of
759 	 * SHA1Transform() each time it is called -- which is
760 	 * unacceptably expensive.
761 	 *
762 	 * the `const' is to ensure that callers are good citizens and
763 	 * do not try to munge the array.  since these routines are
764 	 * going to be called from inside multithreaded kernelland,
765 	 * this is a good safety check. -- `sha1_consts' will end up in
766 	 * .rodata.
767 	 *
768 	 * unfortunately, loading from an array in this manner hurts
769 	 * performance under intel.  so, there is a macro,
770 	 * SHA1_CONST(), used in SHA1Transform(), that either expands to
771 	 * a reference to this array, or to the actual constant,
772 	 * depending on what platform this code is compiled for.
773 	 */
774 
775 #if	defined(__sparc)
776 	static const uint32_t sha1_consts[] = {
777 		SHA1_CONST_0,	SHA1_CONST_1,	SHA1_CONST_2,	SHA1_CONST_3,
778 	};
779 #endif
780 
781 	/*
782 	 * general optimization:
783 	 *
784 	 * use individual integers instead of using an array.  this is a
785 	 * win, although the amount it wins by seems to vary quite a bit.
786 	 */
787 
788 	uint32_t	w_0, w_1, w_2,  w_3,  w_4,  w_5,  w_6,  w_7;
789 	uint32_t	w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
790 
791 	/*
792 	 * sparc optimization:
793 	 *
794 	 * if `block' is already aligned on a 4-byte boundary, use
795 	 * LOAD_BIG_32() directly.  otherwise, bcopy() into a
796 	 * buffer that *is* aligned on a 4-byte boundary and then do
797 	 * the LOAD_BIG_32() on that buffer.  benchmarks have shown
798 	 * that using the bcopy() is better than loading the bytes
799 	 * individually and doing the endian-swap by hand.
800 	 *
801 	 * even though it's quite tempting to assign to do:
802 	 *
803 	 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
804 	 *
805 	 * and only have one set of LOAD_BIG_32()'s, the compiler
806 	 * *does not* like that, so please resist the urge.
807 	 */
808 
809 #if	defined(__sparc)
810 	if ((uintptr_t)blk & 0x3) {		/* not 4-byte aligned? */
811 		bcopy(blk, ctx->buf_un.buf32,  sizeof (ctx->buf_un.buf32));
812 		w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15);
813 		w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14);
814 		w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13);
815 		w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12);
816 		w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11);
817 		w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10);
818 		w_9  = LOAD_BIG_32(ctx->buf_un.buf32 +  9);
819 		w_8  = LOAD_BIG_32(ctx->buf_un.buf32 +  8);
820 		w_7  = LOAD_BIG_32(ctx->buf_un.buf32 +  7);
821 		w_6  = LOAD_BIG_32(ctx->buf_un.buf32 +  6);
822 		w_5  = LOAD_BIG_32(ctx->buf_un.buf32 +  5);
823 		w_4  = LOAD_BIG_32(ctx->buf_un.buf32 +  4);
824 		w_3  = LOAD_BIG_32(ctx->buf_un.buf32 +  3);
825 		w_2  = LOAD_BIG_32(ctx->buf_un.buf32 +  2);
826 		w_1  = LOAD_BIG_32(ctx->buf_un.buf32 +  1);
827 		w_0  = LOAD_BIG_32(ctx->buf_un.buf32 +  0);
828 	} else {
829 		/*LINTED*/
830 		w_15 = LOAD_BIG_32(blk + 60);
831 		/*LINTED*/
832 		w_14 = LOAD_BIG_32(blk + 56);
833 		/*LINTED*/
834 		w_13 = LOAD_BIG_32(blk + 52);
835 		/*LINTED*/
836 		w_12 = LOAD_BIG_32(blk + 48);
837 		/*LINTED*/
838 		w_11 = LOAD_BIG_32(blk + 44);
839 		/*LINTED*/
840 		w_10 = LOAD_BIG_32(blk + 40);
841 		/*LINTED*/
842 		w_9  = LOAD_BIG_32(blk + 36);
843 		/*LINTED*/
844 		w_8  = LOAD_BIG_32(blk + 32);
845 		/*LINTED*/
846 		w_7  = LOAD_BIG_32(blk + 28);
847 		/*LINTED*/
848 		w_6  = LOAD_BIG_32(blk + 24);
849 		/*LINTED*/
850 		w_5  = LOAD_BIG_32(blk + 20);
851 		/*LINTED*/
852 		w_4  = LOAD_BIG_32(blk + 16);
853 		/*LINTED*/
854 		w_3  = LOAD_BIG_32(blk + 12);
855 		/*LINTED*/
856 		w_2  = LOAD_BIG_32(blk +  8);
857 		/*LINTED*/
858 		w_1  = LOAD_BIG_32(blk +  4);
859 		/*LINTED*/
860 		w_0  = LOAD_BIG_32(blk +  0);
861 	}
862 #else
863 	w_15 = LOAD_BIG_32(blk + 60);
864 	w_14 = LOAD_BIG_32(blk + 56);
865 	w_13 = LOAD_BIG_32(blk + 52);
866 	w_12 = LOAD_BIG_32(blk + 48);
867 	w_11 = LOAD_BIG_32(blk + 44);
868 	w_10 = LOAD_BIG_32(blk + 40);
869 	w_9  = LOAD_BIG_32(blk + 36);
870 	w_8  = LOAD_BIG_32(blk + 32);
871 	w_7  = LOAD_BIG_32(blk + 28);
872 	w_6  = LOAD_BIG_32(blk + 24);
873 	w_5  = LOAD_BIG_32(blk + 20);
874 	w_4  = LOAD_BIG_32(blk + 16);
875 	w_3  = LOAD_BIG_32(blk + 12);
876 	w_2  = LOAD_BIG_32(blk +  8);
877 	w_1  = LOAD_BIG_32(blk +  4);
878 	w_0  = LOAD_BIG_32(blk +  0);
879 #endif
880 	/*
881 	 * general optimization:
882 	 *
883 	 * even though this approach is described in the standard as
884 	 * being slower algorithmically, it is 30-40% faster than the
885 	 * "faster" version under SPARC, because this version has more
886 	 * of the constraints specified at compile-time and uses fewer
887 	 * variables (and therefore has better register utilization)
888 	 * than its "speedier" brother.  (i've tried both, trust me)
889 	 *
890 	 * for either method given in the spec, there is an "assignment"
891 	 * phase where the following takes place:
892 	 *
893 	 *	tmp = (main_computation);
894 	 *	e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
895 	 *
896 	 * we can make the algorithm go faster by not doing this work,
897 	 * but just pretending that `d' is now `e', etc. this works
898 	 * really well and obviates the need for a temporary variable.
899 	 * however, we still explictly perform the rotate action,
900 	 * since it is cheaper on SPARC to do it once than to have to
901 	 * do it over and over again.
902 	 */
903 
904 	/* round 1 */
905 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_0 + SHA1_CONST(0); /* 0 */
906 	b = ROTATE_LEFT(b, 30);
907 
908 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_1 + SHA1_CONST(0); /* 1 */
909 	a = ROTATE_LEFT(a, 30);
910 
911 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_2 + SHA1_CONST(0); /* 2 */
912 	e = ROTATE_LEFT(e, 30);
913 
914 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_3 + SHA1_CONST(0); /* 3 */
915 	d = ROTATE_LEFT(d, 30);
916 
917 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_4 + SHA1_CONST(0); /* 4 */
918 	c = ROTATE_LEFT(c, 30);
919 
920 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_5 + SHA1_CONST(0); /* 5 */
921 	b = ROTATE_LEFT(b, 30);
922 
923 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_6 + SHA1_CONST(0); /* 6 */
924 	a = ROTATE_LEFT(a, 30);
925 
926 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_7 + SHA1_CONST(0); /* 7 */
927 	e = ROTATE_LEFT(e, 30);
928 
929 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_8 + SHA1_CONST(0); /* 8 */
930 	d = ROTATE_LEFT(d, 30);
931 
932 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_9 + SHA1_CONST(0); /* 9 */
933 	c = ROTATE_LEFT(c, 30);
934 
935 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_10 + SHA1_CONST(0); /* 10 */
936 	b = ROTATE_LEFT(b, 30);
937 
938 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_11 + SHA1_CONST(0); /* 11 */
939 	a = ROTATE_LEFT(a, 30);
940 
941 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_12 + SHA1_CONST(0); /* 12 */
942 	e = ROTATE_LEFT(e, 30);
943 
944 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_13 + SHA1_CONST(0); /* 13 */
945 	d = ROTATE_LEFT(d, 30);
946 
947 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_14 + SHA1_CONST(0); /* 14 */
948 	c = ROTATE_LEFT(c, 30);
949 
950 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_15 + SHA1_CONST(0); /* 15 */
951 	b = ROTATE_LEFT(b, 30);
952 
953 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 16 */
954 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_0 + SHA1_CONST(0);
955 	a = ROTATE_LEFT(a, 30);
956 
957 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 17 */
958 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_1 + SHA1_CONST(0);
959 	e = ROTATE_LEFT(e, 30);
960 
961 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 18 */
962 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_2 + SHA1_CONST(0);
963 	d = ROTATE_LEFT(d, 30);
964 
965 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 19 */
966 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_3 + SHA1_CONST(0);
967 	c = ROTATE_LEFT(c, 30);
968 
969 	/* round 2 */
970 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 20 */
971 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_4 + SHA1_CONST(1);
972 	b = ROTATE_LEFT(b, 30);
973 
974 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 21 */
975 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_5 + SHA1_CONST(1);
976 	a = ROTATE_LEFT(a, 30);
977 
978 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 22 */
979 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_6 + SHA1_CONST(1);
980 	e = ROTATE_LEFT(e, 30);
981 
982 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 23 */
983 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_7 + SHA1_CONST(1);
984 	d = ROTATE_LEFT(d, 30);
985 
986 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 24 */
987 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_8 + SHA1_CONST(1);
988 	c = ROTATE_LEFT(c, 30);
989 
990 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 25 */
991 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_9 + SHA1_CONST(1);
992 	b = ROTATE_LEFT(b, 30);
993 
994 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 26 */
995 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_10 + SHA1_CONST(1);
996 	a = ROTATE_LEFT(a, 30);
997 
998 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 27 */
999 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_11 + SHA1_CONST(1);
1000 	e = ROTATE_LEFT(e, 30);
1001 
1002 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 28 */
1003 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_12 + SHA1_CONST(1);
1004 	d = ROTATE_LEFT(d, 30);
1005 
1006 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 29 */
1007 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_13 + SHA1_CONST(1);
1008 	c = ROTATE_LEFT(c, 30);
1009 
1010 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 30 */
1011 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_14 + SHA1_CONST(1);
1012 	b = ROTATE_LEFT(b, 30);
1013 
1014 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 31 */
1015 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_15 + SHA1_CONST(1);
1016 	a = ROTATE_LEFT(a, 30);
1017 
1018 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 32 */
1019 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_0 + SHA1_CONST(1);
1020 	e = ROTATE_LEFT(e, 30);
1021 
1022 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 33 */
1023 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_1 + SHA1_CONST(1);
1024 	d = ROTATE_LEFT(d, 30);
1025 
1026 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 34 */
1027 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_2 + SHA1_CONST(1);
1028 	c = ROTATE_LEFT(c, 30);
1029 
1030 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 35 */
1031 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_3 + SHA1_CONST(1);
1032 	b = ROTATE_LEFT(b, 30);
1033 
1034 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 36 */
1035 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_4 + SHA1_CONST(1);
1036 	a = ROTATE_LEFT(a, 30);
1037 
1038 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 37 */
1039 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_5 + SHA1_CONST(1);
1040 	e = ROTATE_LEFT(e, 30);
1041 
1042 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 38 */
1043 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_6 + SHA1_CONST(1);
1044 	d = ROTATE_LEFT(d, 30);
1045 
1046 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 39 */
1047 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_7 + SHA1_CONST(1);
1048 	c = ROTATE_LEFT(c, 30);
1049 
1050 	/* round 3 */
1051 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 40 */
1052 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_8 + SHA1_CONST(2);
1053 	b = ROTATE_LEFT(b, 30);
1054 
1055 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 41 */
1056 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_9 + SHA1_CONST(2);
1057 	a = ROTATE_LEFT(a, 30);
1058 
1059 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 42 */
1060 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_10 + SHA1_CONST(2);
1061 	e = ROTATE_LEFT(e, 30);
1062 
1063 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 43 */
1064 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_11 + SHA1_CONST(2);
1065 	d = ROTATE_LEFT(d, 30);
1066 
1067 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 44 */
1068 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_12 + SHA1_CONST(2);
1069 	c = ROTATE_LEFT(c, 30);
1070 
1071 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 45 */
1072 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_13 + SHA1_CONST(2);
1073 	b = ROTATE_LEFT(b, 30);
1074 
1075 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 46 */
1076 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_14 + SHA1_CONST(2);
1077 	a = ROTATE_LEFT(a, 30);
1078 
1079 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 47 */
1080 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_15 + SHA1_CONST(2);
1081 	e = ROTATE_LEFT(e, 30);
1082 
1083 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 48 */
1084 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_0 + SHA1_CONST(2);
1085 	d = ROTATE_LEFT(d, 30);
1086 
1087 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 49 */
1088 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_1 + SHA1_CONST(2);
1089 	c = ROTATE_LEFT(c, 30);
1090 
1091 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 50 */
1092 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_2 + SHA1_CONST(2);
1093 	b = ROTATE_LEFT(b, 30);
1094 
1095 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 51 */
1096 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_3 + SHA1_CONST(2);
1097 	a = ROTATE_LEFT(a, 30);
1098 
1099 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 52 */
1100 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_4 + SHA1_CONST(2);
1101 	e = ROTATE_LEFT(e, 30);
1102 
1103 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 53 */
1104 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_5 + SHA1_CONST(2);
1105 	d = ROTATE_LEFT(d, 30);
1106 
1107 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 54 */
1108 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_6 + SHA1_CONST(2);
1109 	c = ROTATE_LEFT(c, 30);
1110 
1111 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 55 */
1112 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_7 + SHA1_CONST(2);
1113 	b = ROTATE_LEFT(b, 30);
1114 
1115 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 56 */
1116 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_8 + SHA1_CONST(2);
1117 	a = ROTATE_LEFT(a, 30);
1118 
1119 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 57 */
1120 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_9 + SHA1_CONST(2);
1121 	e = ROTATE_LEFT(e, 30);
1122 
1123 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 58 */
1124 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_10 + SHA1_CONST(2);
1125 	d = ROTATE_LEFT(d, 30);
1126 
1127 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 59 */
1128 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_11 + SHA1_CONST(2);
1129 	c = ROTATE_LEFT(c, 30);
1130 
1131 	/* round 4 */
1132 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 60 */
1133 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_12 + SHA1_CONST(3);
1134 	b = ROTATE_LEFT(b, 30);
1135 
1136 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 61 */
1137 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_13 + SHA1_CONST(3);
1138 	a = ROTATE_LEFT(a, 30);
1139 
1140 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 62 */
1141 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_14 + SHA1_CONST(3);
1142 	e = ROTATE_LEFT(e, 30);
1143 
1144 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 63 */
1145 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_15 + SHA1_CONST(3);
1146 	d = ROTATE_LEFT(d, 30);
1147 
1148 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 64 */
1149 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_0 + SHA1_CONST(3);
1150 	c = ROTATE_LEFT(c, 30);
1151 
1152 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 65 */
1153 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_1 + SHA1_CONST(3);
1154 	b = ROTATE_LEFT(b, 30);
1155 
1156 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 66 */
1157 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_2 + SHA1_CONST(3);
1158 	a = ROTATE_LEFT(a, 30);
1159 
1160 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 67 */
1161 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_3 + SHA1_CONST(3);
1162 	e = ROTATE_LEFT(e, 30);
1163 
1164 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 68 */
1165 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_4 + SHA1_CONST(3);
1166 	d = ROTATE_LEFT(d, 30);
1167 
1168 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 69 */
1169 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_5 + SHA1_CONST(3);
1170 	c = ROTATE_LEFT(c, 30);
1171 
1172 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 70 */
1173 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_6 + SHA1_CONST(3);
1174 	b = ROTATE_LEFT(b, 30);
1175 
1176 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 71 */
1177 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_7 + SHA1_CONST(3);
1178 	a = ROTATE_LEFT(a, 30);
1179 
1180 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 72 */
1181 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_8 + SHA1_CONST(3);
1182 	e = ROTATE_LEFT(e, 30);
1183 
1184 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 73 */
1185 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_9 + SHA1_CONST(3);
1186 	d = ROTATE_LEFT(d, 30);
1187 
1188 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 74 */
1189 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_10 + SHA1_CONST(3);
1190 	c = ROTATE_LEFT(c, 30);
1191 
1192 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 75 */
1193 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_11 + SHA1_CONST(3);
1194 	b = ROTATE_LEFT(b, 30);
1195 
1196 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 76 */
1197 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_12 + SHA1_CONST(3);
1198 	a = ROTATE_LEFT(a, 30);
1199 
1200 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 77 */
1201 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_13 + SHA1_CONST(3);
1202 	e = ROTATE_LEFT(e, 30);
1203 
1204 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 78 */
1205 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_14 + SHA1_CONST(3);
1206 	d = ROTATE_LEFT(d, 30);
1207 
1208 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 79 */
1209 
1210 	ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_15 +
1211 	    SHA1_CONST(3);
1212 	ctx->state[1] += b;
1213 	ctx->state[2] += ROTATE_LEFT(c, 30);
1214 	ctx->state[3] += d;
1215 	ctx->state[4] += e;
1216 
1217 	/* zeroize sensitive information */
1218 	w_0 = w_1 = w_2 = w_3 = w_4 = w_5 = w_6 = w_7 = w_8 = 0;
1219 	w_9 = w_10 = w_11 = w_12 = w_13 = w_14 = w_15 = 0;
1220 }
1221 
1222 /*
1223  * devpro compiler optimization:
1224  *
1225  * the compiler can generate better code if it knows that `input' and
1226  * `output' do not point to the same source.  there is no portable
1227  * way to tell the compiler this, but the sun compiler recognizes the
1228  * `_Restrict' keyword to indicate this condition.  use it if possible.
1229  */
1230 
1231 #ifdef	__RESTRICT
1232 #define	restrict	_Restrict
1233 #else
1234 #define	restrict	/* nothing */
1235 #endif
1236 
1237 /*
1238  * Encode()
1239  *
1240  * purpose: to convert a list of numbers from little endian to big endian
1241  *   input: uint8_t *	: place to store the converted big endian numbers
1242  *	    uint32_t *	: place to get numbers to convert from
1243  *          size_t	: the length of the input in bytes
1244  *  output: void
1245  */
1246 
1247 static void
1248 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t len)
1249 {
1250 	size_t		i, j;
1251 
1252 #if	defined(__sparc)
1253 	if (IS_P2ALIGNED(output, sizeof (uint32_t))) {
1254 		for (i = 0, j = 0; j < len; i++, j += 4) {
1255 			/* LINTED: pointer alignment */
1256 			*((uint32_t *)(output + j)) = input[i];
1257 		}
1258 	} else {
1259 #endif	/* little endian -- will work on big endian, but slowly */
1260 		for (i = 0, j = 0; j < len; i++, j += 4) {
1261 			output[j]	= (input[i] >> 24) & 0xff;
1262 			output[j + 1]	= (input[i] >> 16) & 0xff;
1263 			output[j + 2]	= (input[i] >>  8) & 0xff;
1264 			output[j + 3]	= input[i] & 0xff;
1265 		}
1266 #if	defined(__sparc)
1267 	}
1268 #endif
1269 }
1270 
1271 
1272 #ifdef _KERNEL
1273 
1274 /*
1275  * KCF software provider control entry points.
1276  */
1277 /* ARGSUSED */
1278 static void
1279 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
1280 {
1281 	*status = CRYPTO_PROVIDER_READY;
1282 }
1283 
1284 /*
1285  * KCF software provider digest entry points.
1286  */
1287 
1288 static int
1289 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1290     crypto_req_handle_t req)
1291 {
1292 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1293 		return (CRYPTO_MECHANISM_INVALID);
1294 
1295 	/*
1296 	 * Allocate and initialize SHA1 context.
1297 	 */
1298 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
1299 	    crypto_kmflag(req));
1300 	if (ctx->cc_provider_private == NULL)
1301 		return (CRYPTO_HOST_MEMORY);
1302 
1303 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
1304 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1305 
1306 	return (CRYPTO_SUCCESS);
1307 }
1308 
1309 /*
1310  * Helper SHA1 digest update function for uio data.
1311  */
1312 static int
1313 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1314 {
1315 	off_t offset = data->cd_offset;
1316 	size_t length = data->cd_length;
1317 	uint_t vec_idx;
1318 	size_t cur_len;
1319 
1320 	/* we support only kernel buffer */
1321 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
1322 		return (CRYPTO_ARGUMENTS_BAD);
1323 
1324 	/*
1325 	 * Jump to the first iovec containing data to be
1326 	 * digested.
1327 	 */
1328 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
1329 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
1330 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
1331 	if (vec_idx == data->cd_uio->uio_iovcnt) {
1332 		/*
1333 		 * The caller specified an offset that is larger than the
1334 		 * total size of the buffers it provided.
1335 		 */
1336 		return (CRYPTO_DATA_LEN_RANGE);
1337 	}
1338 
1339 	/*
1340 	 * Now do the digesting on the iovecs.
1341 	 */
1342 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
1343 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
1344 		    offset, length);
1345 
1346 		SHA1Update(sha1_ctx,
1347 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
1348 		    cur_len);
1349 
1350 		length -= cur_len;
1351 		vec_idx++;
1352 		offset = 0;
1353 	}
1354 
1355 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
1356 		/*
1357 		 * The end of the specified iovec's was reached but
1358 		 * the length requested could not be processed, i.e.
1359 		 * The caller requested to digest more data than it provided.
1360 		 */
1361 		return (CRYPTO_DATA_LEN_RANGE);
1362 	}
1363 
1364 	return (CRYPTO_SUCCESS);
1365 }
1366 
1367 /*
1368  * Helper SHA1 digest final function for uio data.
1369  * digest_len is the length of the desired digest. If digest_len
1370  * is smaller than the default SHA1 digest length, the caller
1371  * must pass a scratch buffer, digest_scratch, which must
1372  * be at least SHA1_DIGEST_LENGTH bytes.
1373  */
1374 static int
1375 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1376     ulong_t digest_len, uchar_t *digest_scratch)
1377 {
1378 	off_t offset = digest->cd_offset;
1379 	uint_t vec_idx;
1380 
1381 	/* we support only kernel buffer */
1382 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
1383 		return (CRYPTO_ARGUMENTS_BAD);
1384 
1385 	/*
1386 	 * Jump to the first iovec containing ptr to the digest to
1387 	 * be returned.
1388 	 */
1389 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1390 	    vec_idx < digest->cd_uio->uio_iovcnt;
1391 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1392 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1393 		/*
1394 		 * The caller specified an offset that is
1395 		 * larger than the total size of the buffers
1396 		 * it provided.
1397 		 */
1398 		return (CRYPTO_DATA_LEN_RANGE);
1399 	}
1400 
1401 	if (offset + digest_len <=
1402 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1403 		/*
1404 		 * The computed SHA1 digest will fit in the current
1405 		 * iovec.
1406 		 */
1407 		if (digest_len != SHA1_DIGEST_LENGTH) {
1408 			/*
1409 			 * The caller requested a short digest. Digest
1410 			 * into a scratch buffer and return to
1411 			 * the user only what was requested.
1412 			 */
1413 			SHA1Final(digest_scratch, sha1_ctx);
1414 			bcopy(digest_scratch, (uchar_t *)digest->
1415 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1416 			    digest_len);
1417 		} else {
1418 			SHA1Final((uchar_t *)digest->
1419 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1420 			    sha1_ctx);
1421 		}
1422 	} else {
1423 		/*
1424 		 * The computed digest will be crossing one or more iovec's.
1425 		 * This is bad performance-wise but we need to support it.
1426 		 * Allocate a small scratch buffer on the stack and
1427 		 * copy it piece meal to the specified digest iovec's.
1428 		 */
1429 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1430 		off_t scratch_offset = 0;
1431 		size_t length = digest_len;
1432 		size_t cur_len;
1433 
1434 		SHA1Final(digest_tmp, sha1_ctx);
1435 
1436 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1437 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1438 			    offset, length);
1439 			bcopy(digest_tmp + scratch_offset,
1440 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1441 			    cur_len);
1442 
1443 			length -= cur_len;
1444 			vec_idx++;
1445 			scratch_offset += cur_len;
1446 			offset = 0;
1447 		}
1448 
1449 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1450 			/*
1451 			 * The end of the specified iovec's was reached but
1452 			 * the length requested could not be processed, i.e.
1453 			 * The caller requested to digest more data than it
1454 			 * provided.
1455 			 */
1456 			return (CRYPTO_DATA_LEN_RANGE);
1457 		}
1458 	}
1459 
1460 	return (CRYPTO_SUCCESS);
1461 }
1462 
1463 /*
1464  * Helper SHA1 digest update for mblk's.
1465  */
1466 static int
1467 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1468 {
1469 	off_t offset = data->cd_offset;
1470 	size_t length = data->cd_length;
1471 	mblk_t *mp;
1472 	size_t cur_len;
1473 
1474 	/*
1475 	 * Jump to the first mblk_t containing data to be digested.
1476 	 */
1477 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1478 	    offset -= MBLKL(mp), mp = mp->b_cont);
1479 	if (mp == NULL) {
1480 		/*
1481 		 * The caller specified an offset that is larger than the
1482 		 * total size of the buffers it provided.
1483 		 */
1484 		return (CRYPTO_DATA_LEN_RANGE);
1485 	}
1486 
1487 	/*
1488 	 * Now do the digesting on the mblk chain.
1489 	 */
1490 	while (mp != NULL && length > 0) {
1491 		cur_len = MIN(MBLKL(mp) - offset, length);
1492 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
1493 		length -= cur_len;
1494 		offset = 0;
1495 		mp = mp->b_cont;
1496 	}
1497 
1498 	if (mp == NULL && length > 0) {
1499 		/*
1500 		 * The end of the mblk was reached but the length requested
1501 		 * could not be processed, i.e. The caller requested
1502 		 * to digest more data than it provided.
1503 		 */
1504 		return (CRYPTO_DATA_LEN_RANGE);
1505 	}
1506 
1507 	return (CRYPTO_SUCCESS);
1508 }
1509 
1510 /*
1511  * Helper SHA1 digest final for mblk's.
1512  * digest_len is the length of the desired digest. If digest_len
1513  * is smaller than the default SHA1 digest length, the caller
1514  * must pass a scratch buffer, digest_scratch, which must
1515  * be at least SHA1_DIGEST_LENGTH bytes.
1516  */
1517 static int
1518 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1519     ulong_t digest_len, uchar_t *digest_scratch)
1520 {
1521 	off_t offset = digest->cd_offset;
1522 	mblk_t *mp;
1523 
1524 	/*
1525 	 * Jump to the first mblk_t that will be used to store the digest.
1526 	 */
1527 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1528 	    offset -= MBLKL(mp), mp = mp->b_cont);
1529 	if (mp == NULL) {
1530 		/*
1531 		 * The caller specified an offset that is larger than the
1532 		 * total size of the buffers it provided.
1533 		 */
1534 		return (CRYPTO_DATA_LEN_RANGE);
1535 	}
1536 
1537 	if (offset + digest_len <= MBLKL(mp)) {
1538 		/*
1539 		 * The computed SHA1 digest will fit in the current mblk.
1540 		 * Do the SHA1Final() in-place.
1541 		 */
1542 		if (digest_len != SHA1_DIGEST_LENGTH) {
1543 			/*
1544 			 * The caller requested a short digest. Digest
1545 			 * into a scratch buffer and return to
1546 			 * the user only what was requested.
1547 			 */
1548 			SHA1Final(digest_scratch, sha1_ctx);
1549 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1550 		} else {
1551 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
1552 		}
1553 	} else {
1554 		/*
1555 		 * The computed digest will be crossing one or more mblk's.
1556 		 * This is bad performance-wise but we need to support it.
1557 		 * Allocate a small scratch buffer on the stack and
1558 		 * copy it piece meal to the specified digest iovec's.
1559 		 */
1560 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1561 		off_t scratch_offset = 0;
1562 		size_t length = digest_len;
1563 		size_t cur_len;
1564 
1565 		SHA1Final(digest_tmp, sha1_ctx);
1566 
1567 		while (mp != NULL && length > 0) {
1568 			cur_len = MIN(MBLKL(mp) - offset, length);
1569 			bcopy(digest_tmp + scratch_offset,
1570 			    mp->b_rptr + offset, cur_len);
1571 
1572 			length -= cur_len;
1573 			mp = mp->b_cont;
1574 			scratch_offset += cur_len;
1575 			offset = 0;
1576 		}
1577 
1578 		if (mp == NULL && length > 0) {
1579 			/*
1580 			 * The end of the specified mblk was reached but
1581 			 * the length requested could not be processed, i.e.
1582 			 * The caller requested to digest more data than it
1583 			 * provided.
1584 			 */
1585 			return (CRYPTO_DATA_LEN_RANGE);
1586 		}
1587 	}
1588 
1589 	return (CRYPTO_SUCCESS);
1590 }
1591 
1592 /* ARGSUSED */
1593 static int
1594 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1595     crypto_req_handle_t req)
1596 {
1597 	int ret = CRYPTO_SUCCESS;
1598 
1599 	ASSERT(ctx->cc_provider_private != NULL);
1600 
1601 	/*
1602 	 * We need to just return the length needed to store the output.
1603 	 * We should not destroy the context for the following cases.
1604 	 */
1605 	if ((digest->cd_length == 0) ||
1606 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1607 		digest->cd_length = SHA1_DIGEST_LENGTH;
1608 		return (CRYPTO_BUFFER_TOO_SMALL);
1609 	}
1610 
1611 	/*
1612 	 * Do the SHA1 update on the specified input data.
1613 	 */
1614 	switch (data->cd_format) {
1615 	case CRYPTO_DATA_RAW:
1616 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1617 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1618 		    data->cd_length);
1619 		break;
1620 	case CRYPTO_DATA_UIO:
1621 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1622 		    data);
1623 		break;
1624 	case CRYPTO_DATA_MBLK:
1625 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1626 		    data);
1627 		break;
1628 	default:
1629 		ret = CRYPTO_ARGUMENTS_BAD;
1630 	}
1631 
1632 	if (ret != CRYPTO_SUCCESS) {
1633 		/* the update failed, free context and bail */
1634 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1635 		ctx->cc_provider_private = NULL;
1636 		digest->cd_length = 0;
1637 		return (ret);
1638 	}
1639 
1640 	/*
1641 	 * Do a SHA1 final, must be done separately since the digest
1642 	 * type can be different than the input data type.
1643 	 */
1644 	switch (digest->cd_format) {
1645 	case CRYPTO_DATA_RAW:
1646 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1647 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1648 		break;
1649 	case CRYPTO_DATA_UIO:
1650 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1651 		    digest, SHA1_DIGEST_LENGTH, NULL);
1652 		break;
1653 	case CRYPTO_DATA_MBLK:
1654 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1655 		    digest, SHA1_DIGEST_LENGTH, NULL);
1656 		break;
1657 	default:
1658 		ret = CRYPTO_ARGUMENTS_BAD;
1659 	}
1660 
1661 	/* all done, free context and return */
1662 
1663 	if (ret == CRYPTO_SUCCESS) {
1664 		digest->cd_length = SHA1_DIGEST_LENGTH;
1665 	} else {
1666 		digest->cd_length = 0;
1667 	}
1668 
1669 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1670 	ctx->cc_provider_private = NULL;
1671 	return (ret);
1672 }
1673 
1674 /* ARGSUSED */
1675 static int
1676 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1677     crypto_req_handle_t req)
1678 {
1679 	int ret = CRYPTO_SUCCESS;
1680 
1681 	ASSERT(ctx->cc_provider_private != NULL);
1682 
1683 	/*
1684 	 * Do the SHA1 update on the specified input data.
1685 	 */
1686 	switch (data->cd_format) {
1687 	case CRYPTO_DATA_RAW:
1688 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1689 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1690 		    data->cd_length);
1691 		break;
1692 	case CRYPTO_DATA_UIO:
1693 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1694 		    data);
1695 		break;
1696 	case CRYPTO_DATA_MBLK:
1697 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1698 		    data);
1699 		break;
1700 	default:
1701 		ret = CRYPTO_ARGUMENTS_BAD;
1702 	}
1703 
1704 	return (ret);
1705 }
1706 
1707 /* ARGSUSED */
1708 static int
1709 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1710     crypto_req_handle_t req)
1711 {
1712 	int ret = CRYPTO_SUCCESS;
1713 
1714 	ASSERT(ctx->cc_provider_private != NULL);
1715 
1716 	/*
1717 	 * We need to just return the length needed to store the output.
1718 	 * We should not destroy the context for the following cases.
1719 	 */
1720 	if ((digest->cd_length == 0) ||
1721 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1722 		digest->cd_length = SHA1_DIGEST_LENGTH;
1723 		return (CRYPTO_BUFFER_TOO_SMALL);
1724 	}
1725 
1726 	/*
1727 	 * Do a SHA1 final.
1728 	 */
1729 	switch (digest->cd_format) {
1730 	case CRYPTO_DATA_RAW:
1731 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1732 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1733 		break;
1734 	case CRYPTO_DATA_UIO:
1735 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1736 		    digest, SHA1_DIGEST_LENGTH, NULL);
1737 		break;
1738 	case CRYPTO_DATA_MBLK:
1739 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1740 		    digest, SHA1_DIGEST_LENGTH, NULL);
1741 		break;
1742 	default:
1743 		ret = CRYPTO_ARGUMENTS_BAD;
1744 	}
1745 
1746 	/* all done, free context and return */
1747 
1748 	if (ret == CRYPTO_SUCCESS) {
1749 		digest->cd_length = SHA1_DIGEST_LENGTH;
1750 	} else {
1751 		digest->cd_length = 0;
1752 	}
1753 
1754 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1755 	ctx->cc_provider_private = NULL;
1756 
1757 	return (ret);
1758 }
1759 
1760 /* ARGSUSED */
1761 static int
1762 sha1_digest_atomic(crypto_provider_handle_t provider,
1763     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1764     crypto_data_t *data, crypto_data_t *digest,
1765     crypto_req_handle_t req)
1766 {
1767 	int ret = CRYPTO_SUCCESS;
1768 	SHA1_CTX sha1_ctx;
1769 
1770 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1771 		return (CRYPTO_MECHANISM_INVALID);
1772 
1773 	/*
1774 	 * Do the SHA1 init.
1775 	 */
1776 	SHA1Init(&sha1_ctx);
1777 
1778 	/*
1779 	 * Do the SHA1 update on the specified input data.
1780 	 */
1781 	switch (data->cd_format) {
1782 	case CRYPTO_DATA_RAW:
1783 		SHA1Update(&sha1_ctx,
1784 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1785 		    data->cd_length);
1786 		break;
1787 	case CRYPTO_DATA_UIO:
1788 		ret = sha1_digest_update_uio(&sha1_ctx, data);
1789 		break;
1790 	case CRYPTO_DATA_MBLK:
1791 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
1792 		break;
1793 	default:
1794 		ret = CRYPTO_ARGUMENTS_BAD;
1795 	}
1796 
1797 	if (ret != CRYPTO_SUCCESS) {
1798 		/* the update failed, bail */
1799 		digest->cd_length = 0;
1800 		return (ret);
1801 	}
1802 
1803 	/*
1804 	 * Do a SHA1 final, must be done separately since the digest
1805 	 * type can be different than the input data type.
1806 	 */
1807 	switch (digest->cd_format) {
1808 	case CRYPTO_DATA_RAW:
1809 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1810 		    digest->cd_offset, &sha1_ctx);
1811 		break;
1812 	case CRYPTO_DATA_UIO:
1813 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
1814 		    SHA1_DIGEST_LENGTH, NULL);
1815 		break;
1816 	case CRYPTO_DATA_MBLK:
1817 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
1818 		    SHA1_DIGEST_LENGTH, NULL);
1819 		break;
1820 	default:
1821 		ret = CRYPTO_ARGUMENTS_BAD;
1822 	}
1823 
1824 	if (ret == CRYPTO_SUCCESS) {
1825 		digest->cd_length = SHA1_DIGEST_LENGTH;
1826 	} else {
1827 		digest->cd_length = 0;
1828 	}
1829 
1830 	return (ret);
1831 }
1832 
1833 /*
1834  * KCF software provider mac entry points.
1835  *
1836  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
1837  *
1838  * Init:
1839  * The initialization routine initializes what we denote
1840  * as the inner and outer contexts by doing
1841  * - for inner context: SHA1(key XOR ipad)
1842  * - for outer context: SHA1(key XOR opad)
1843  *
1844  * Update:
1845  * Each subsequent SHA1 HMAC update will result in an
1846  * update of the inner context with the specified data.
1847  *
1848  * Final:
1849  * The SHA1 HMAC final will do a SHA1 final operation on the
1850  * inner context, and the resulting digest will be used
1851  * as the data for an update on the outer context. Last
1852  * but not least, a SHA1 final on the outer context will
1853  * be performed to obtain the SHA1 HMAC digest to return
1854  * to the user.
1855  */
1856 
1857 /*
1858  * Initialize a SHA1-HMAC context.
1859  */
1860 static void
1861 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1862 {
1863 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
1864 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
1865 	uint_t i;
1866 
1867 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
1868 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
1869 
1870 	bcopy(keyval, ipad, length_in_bytes);
1871 	bcopy(keyval, opad, length_in_bytes);
1872 
1873 	/* XOR key with ipad (0x36) and opad (0x5c) */
1874 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
1875 		ipad[i] ^= 0x36363636;
1876 		opad[i] ^= 0x5c5c5c5c;
1877 	}
1878 
1879 	/* perform SHA1 on ipad */
1880 	SHA1Init(&ctx->hc_icontext);
1881 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
1882 
1883 	/* perform SHA1 on opad */
1884 	SHA1Init(&ctx->hc_ocontext);
1885 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
1886 }
1887 
1888 /*
1889  */
1890 static int
1891 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1892     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1893     crypto_req_handle_t req)
1894 {
1895 	int ret = CRYPTO_SUCCESS;
1896 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1897 
1898 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1899 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1900 		return (CRYPTO_MECHANISM_INVALID);
1901 
1902 	/* Add support for key by attributes (RFE 4706552) */
1903 	if (key->ck_format != CRYPTO_KEY_RAW)
1904 		return (CRYPTO_ARGUMENTS_BAD);
1905 
1906 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1907 	    crypto_kmflag(req));
1908 	if (ctx->cc_provider_private == NULL)
1909 		return (CRYPTO_HOST_MEMORY);
1910 
1911 	if (ctx_template != NULL) {
1912 		/* reuse context template */
1913 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
1914 		    sizeof (sha1_hmac_ctx_t));
1915 	} else {
1916 		/* no context template, compute context */
1917 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1918 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
1919 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1920 
1921 			/*
1922 			 * Hash the passed-in key to get a smaller key.
1923 			 * The inner context is used since it hasn't been
1924 			 * initialized yet.
1925 			 */
1926 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
1927 			    key->ck_data, keylen_in_bytes, digested_key);
1928 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1929 			    digested_key, SHA1_DIGEST_LENGTH);
1930 		} else {
1931 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1932 			    key->ck_data, keylen_in_bytes);
1933 		}
1934 	}
1935 
1936 	/*
1937 	 * Get the mechanism parameters, if applicable.
1938 	 */
1939 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1940 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1941 		if (mechanism->cm_param == NULL ||
1942 		    mechanism->cm_param_len != sizeof (ulong_t))
1943 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1944 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
1945 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
1946 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
1947 		    SHA1_DIGEST_LENGTH)
1948 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1949 	}
1950 
1951 	if (ret != CRYPTO_SUCCESS) {
1952 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1953 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1954 		ctx->cc_provider_private = NULL;
1955 	}
1956 
1957 	return (ret);
1958 }
1959 
1960 /* ARGSUSED */
1961 static int
1962 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1963 {
1964 	int ret = CRYPTO_SUCCESS;
1965 
1966 	ASSERT(ctx->cc_provider_private != NULL);
1967 
1968 	/*
1969 	 * Do a SHA1 update of the inner context using the specified
1970 	 * data.
1971 	 */
1972 	switch (data->cd_format) {
1973 	case CRYPTO_DATA_RAW:
1974 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
1975 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1976 		    data->cd_length);
1977 		break;
1978 	case CRYPTO_DATA_UIO:
1979 		ret = sha1_digest_update_uio(
1980 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1981 		break;
1982 	case CRYPTO_DATA_MBLK:
1983 		ret = sha1_digest_update_mblk(
1984 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1985 		break;
1986 	default:
1987 		ret = CRYPTO_ARGUMENTS_BAD;
1988 	}
1989 
1990 	return (ret);
1991 }
1992 
1993 /* ARGSUSED */
1994 static int
1995 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1996 {
1997 	int ret = CRYPTO_SUCCESS;
1998 	uchar_t digest[SHA1_DIGEST_LENGTH];
1999 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2000 
2001 	ASSERT(ctx->cc_provider_private != NULL);
2002 
2003 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
2004 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
2005 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
2006 
2007 	/*
2008 	 * We need to just return the length needed to store the output.
2009 	 * We should not destroy the context for the following cases.
2010 	 */
2011 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
2012 		mac->cd_length = digest_len;
2013 		return (CRYPTO_BUFFER_TOO_SMALL);
2014 	}
2015 
2016 	/*
2017 	 * Do a SHA1 final on the inner context.
2018 	 */
2019 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
2020 
2021 	/*
2022 	 * Do a SHA1 update on the outer context, feeding the inner
2023 	 * digest as data.
2024 	 */
2025 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
2026 	    SHA1_DIGEST_LENGTH);
2027 
2028 	/*
2029 	 * Do a SHA1 final on the outer context, storing the computing
2030 	 * digest in the users buffer.
2031 	 */
2032 	switch (mac->cd_format) {
2033 	case CRYPTO_DATA_RAW:
2034 		if (digest_len != SHA1_DIGEST_LENGTH) {
2035 			/*
2036 			 * The caller requested a short digest. Digest
2037 			 * into a scratch buffer and return to
2038 			 * the user only what was requested.
2039 			 */
2040 			SHA1Final(digest,
2041 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2042 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2043 			    mac->cd_offset, digest_len);
2044 		} else {
2045 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2046 			    mac->cd_offset,
2047 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2048 		}
2049 		break;
2050 	case CRYPTO_DATA_UIO:
2051 		ret = sha1_digest_final_uio(
2052 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2053 		    digest_len, digest);
2054 		break;
2055 	case CRYPTO_DATA_MBLK:
2056 		ret = sha1_digest_final_mblk(
2057 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2058 		    digest_len, digest);
2059 		break;
2060 	default:
2061 		ret = CRYPTO_ARGUMENTS_BAD;
2062 	}
2063 
2064 	if (ret == CRYPTO_SUCCESS) {
2065 		mac->cd_length = digest_len;
2066 	} else {
2067 		mac->cd_length = 0;
2068 	}
2069 
2070 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2071 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2072 	ctx->cc_provider_private = NULL;
2073 
2074 	return (ret);
2075 }
2076 
2077 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
2078 	switch (data->cd_format) {					\
2079 	case CRYPTO_DATA_RAW:						\
2080 		SHA1Update(&(ctx).hc_icontext,				\
2081 		    (uint8_t *)data->cd_raw.iov_base +			\
2082 		    data->cd_offset, data->cd_length);			\
2083 		break;							\
2084 	case CRYPTO_DATA_UIO:						\
2085 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
2086 		break;							\
2087 	case CRYPTO_DATA_MBLK:						\
2088 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
2089 		    data);						\
2090 		break;							\
2091 	default:							\
2092 		ret = CRYPTO_ARGUMENTS_BAD;				\
2093 	}								\
2094 }
2095 
2096 /* ARGSUSED */
2097 static int
2098 sha1_mac_atomic(crypto_provider_handle_t provider,
2099     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2100     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2101     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2102 {
2103 	int ret = CRYPTO_SUCCESS;
2104 	uchar_t digest[SHA1_DIGEST_LENGTH];
2105 	sha1_hmac_ctx_t sha1_hmac_ctx;
2106 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2107 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2108 
2109 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2110 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2111 		return (CRYPTO_MECHANISM_INVALID);
2112 
2113 	/* Add support for key by attributes (RFE 4706552) */
2114 	if (key->ck_format != CRYPTO_KEY_RAW)
2115 		return (CRYPTO_ARGUMENTS_BAD);
2116 
2117 	if (ctx_template != NULL) {
2118 		/* reuse context template */
2119 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2120 	} else {
2121 		/* no context template, initialize context */
2122 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2123 			/*
2124 			 * Hash the passed-in key to get a smaller key.
2125 			 * The inner context is used since it hasn't been
2126 			 * initialized yet.
2127 			 */
2128 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2129 			    key->ck_data, keylen_in_bytes, digest);
2130 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2131 			    SHA1_DIGEST_LENGTH);
2132 		} else {
2133 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2134 			    keylen_in_bytes);
2135 		}
2136 	}
2137 
2138 	/* get the mechanism parameters, if applicable */
2139 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2140 		if (mechanism->cm_param == NULL ||
2141 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2142 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2143 			goto bail;
2144 		}
2145 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2146 		if (digest_len > SHA1_DIGEST_LENGTH) {
2147 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2148 			goto bail;
2149 		}
2150 	}
2151 
2152 	/* do a SHA1 update of the inner context using the specified data */
2153 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2154 	if (ret != CRYPTO_SUCCESS)
2155 		/* the update failed, free context and bail */
2156 		goto bail;
2157 
2158 	/*
2159 	 * Do a SHA1 final on the inner context.
2160 	 */
2161 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2162 
2163 	/*
2164 	 * Do an SHA1 update on the outer context, feeding the inner
2165 	 * digest as data.
2166 	 */
2167 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2168 
2169 	/*
2170 	 * Do a SHA1 final on the outer context, storing the computed
2171 	 * digest in the users buffer.
2172 	 */
2173 	switch (mac->cd_format) {
2174 	case CRYPTO_DATA_RAW:
2175 		if (digest_len != SHA1_DIGEST_LENGTH) {
2176 			/*
2177 			 * The caller requested a short digest. Digest
2178 			 * into a scratch buffer and return to
2179 			 * the user only what was requested.
2180 			 */
2181 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2182 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2183 			    mac->cd_offset, digest_len);
2184 		} else {
2185 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2186 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
2187 		}
2188 		break;
2189 	case CRYPTO_DATA_UIO:
2190 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
2191 		    digest_len, digest);
2192 		break;
2193 	case CRYPTO_DATA_MBLK:
2194 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
2195 		    digest_len, digest);
2196 		break;
2197 	default:
2198 		ret = CRYPTO_ARGUMENTS_BAD;
2199 	}
2200 
2201 	if (ret == CRYPTO_SUCCESS) {
2202 		mac->cd_length = digest_len;
2203 	} else {
2204 		mac->cd_length = 0;
2205 	}
2206 	/* Extra paranoia: zeroize the context on the stack */
2207 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2208 
2209 	return (ret);
2210 bail:
2211 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2212 	mac->cd_length = 0;
2213 	return (ret);
2214 }
2215 
2216 /* ARGSUSED */
2217 static int
2218 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
2219     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2220     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2221     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2222 {
2223 	int ret = CRYPTO_SUCCESS;
2224 	uchar_t digest[SHA1_DIGEST_LENGTH];
2225 	sha1_hmac_ctx_t sha1_hmac_ctx;
2226 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2227 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2228 
2229 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2230 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2231 		return (CRYPTO_MECHANISM_INVALID);
2232 
2233 	/* Add support for key by attributes (RFE 4706552) */
2234 	if (key->ck_format != CRYPTO_KEY_RAW)
2235 		return (CRYPTO_ARGUMENTS_BAD);
2236 
2237 	if (ctx_template != NULL) {
2238 		/* reuse context template */
2239 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2240 	} else {
2241 		/* no context template, initialize context */
2242 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2243 			/*
2244 			 * Hash the passed-in key to get a smaller key.
2245 			 * The inner context is used since it hasn't been
2246 			 * initialized yet.
2247 			 */
2248 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2249 			    key->ck_data, keylen_in_bytes, digest);
2250 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2251 			    SHA1_DIGEST_LENGTH);
2252 		} else {
2253 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2254 			    keylen_in_bytes);
2255 		}
2256 	}
2257 
2258 	/* get the mechanism parameters, if applicable */
2259 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2260 		if (mechanism->cm_param == NULL ||
2261 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2262 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2263 			goto bail;
2264 		}
2265 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2266 		if (digest_len > SHA1_DIGEST_LENGTH) {
2267 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2268 			goto bail;
2269 		}
2270 	}
2271 
2272 	if (mac->cd_length != digest_len) {
2273 		ret = CRYPTO_INVALID_MAC;
2274 		goto bail;
2275 	}
2276 
2277 	/* do a SHA1 update of the inner context using the specified data */
2278 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2279 	if (ret != CRYPTO_SUCCESS)
2280 		/* the update failed, free context and bail */
2281 		goto bail;
2282 
2283 	/* do a SHA1 final on the inner context */
2284 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2285 
2286 	/*
2287 	 * Do an SHA1 update on the outer context, feeding the inner
2288 	 * digest as data.
2289 	 */
2290 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2291 
2292 	/*
2293 	 * Do a SHA1 final on the outer context, storing the computed
2294 	 * digest in the users buffer.
2295 	 */
2296 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2297 
2298 	/*
2299 	 * Compare the computed digest against the expected digest passed
2300 	 * as argument.
2301 	 */
2302 
2303 	switch (mac->cd_format) {
2304 
2305 	case CRYPTO_DATA_RAW:
2306 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
2307 		    mac->cd_offset, digest_len) != 0)
2308 			ret = CRYPTO_INVALID_MAC;
2309 		break;
2310 
2311 	case CRYPTO_DATA_UIO: {
2312 		off_t offset = mac->cd_offset;
2313 		uint_t vec_idx;
2314 		off_t scratch_offset = 0;
2315 		size_t length = digest_len;
2316 		size_t cur_len;
2317 
2318 		/* we support only kernel buffer */
2319 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
2320 			return (CRYPTO_ARGUMENTS_BAD);
2321 
2322 		/* jump to the first iovec containing the expected digest */
2323 		for (vec_idx = 0;
2324 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
2325 		    vec_idx < mac->cd_uio->uio_iovcnt;
2326 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
2327 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
2328 			/*
2329 			 * The caller specified an offset that is
2330 			 * larger than the total size of the buffers
2331 			 * it provided.
2332 			 */
2333 			ret = CRYPTO_DATA_LEN_RANGE;
2334 			break;
2335 		}
2336 
2337 		/* do the comparison of computed digest vs specified one */
2338 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
2339 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
2340 			    offset, length);
2341 
2342 			if (bcmp(digest + scratch_offset,
2343 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
2344 			    cur_len) != 0) {
2345 				ret = CRYPTO_INVALID_MAC;
2346 				break;
2347 			}
2348 
2349 			length -= cur_len;
2350 			vec_idx++;
2351 			scratch_offset += cur_len;
2352 			offset = 0;
2353 		}
2354 		break;
2355 	}
2356 
2357 	case CRYPTO_DATA_MBLK: {
2358 		off_t offset = mac->cd_offset;
2359 		mblk_t *mp;
2360 		off_t scratch_offset = 0;
2361 		size_t length = digest_len;
2362 		size_t cur_len;
2363 
2364 		/* jump to the first mblk_t containing the expected digest */
2365 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
2366 		    offset -= MBLKL(mp), mp = mp->b_cont);
2367 		if (mp == NULL) {
2368 			/*
2369 			 * The caller specified an offset that is larger than
2370 			 * the total size of the buffers it provided.
2371 			 */
2372 			ret = CRYPTO_DATA_LEN_RANGE;
2373 			break;
2374 		}
2375 
2376 		while (mp != NULL && length > 0) {
2377 			cur_len = MIN(MBLKL(mp) - offset, length);
2378 			if (bcmp(digest + scratch_offset,
2379 			    mp->b_rptr + offset, cur_len) != 0) {
2380 				ret = CRYPTO_INVALID_MAC;
2381 				break;
2382 			}
2383 
2384 			length -= cur_len;
2385 			mp = mp->b_cont;
2386 			scratch_offset += cur_len;
2387 			offset = 0;
2388 		}
2389 		break;
2390 	}
2391 
2392 	default:
2393 		ret = CRYPTO_ARGUMENTS_BAD;
2394 	}
2395 
2396 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2397 	return (ret);
2398 bail:
2399 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2400 	mac->cd_length = 0;
2401 	return (ret);
2402 }
2403 
2404 /*
2405  * KCF software provider context management entry points.
2406  */
2407 
2408 /* ARGSUSED */
2409 static int
2410 sha1_create_ctx_template(crypto_provider_handle_t provider,
2411     crypto_mechanism_t *mechanism, crypto_key_t *key,
2412     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2413     crypto_req_handle_t req)
2414 {
2415 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
2416 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2417 
2418 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
2419 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
2420 		return (CRYPTO_MECHANISM_INVALID);
2421 	}
2422 
2423 	/* Add support for key by attributes (RFE 4706552) */
2424 	if (key->ck_format != CRYPTO_KEY_RAW)
2425 		return (CRYPTO_ARGUMENTS_BAD);
2426 
2427 	/*
2428 	 * Allocate and initialize SHA1 context.
2429 	 */
2430 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
2431 	    crypto_kmflag(req));
2432 	if (sha1_hmac_ctx_tmpl == NULL)
2433 		return (CRYPTO_HOST_MEMORY);
2434 
2435 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2436 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
2437 
2438 		/*
2439 		 * Hash the passed-in key to get a smaller key.
2440 		 * The inner context is used since it hasn't been
2441 		 * initialized yet.
2442 		 */
2443 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
2444 		    key->ck_data, keylen_in_bytes, digested_key);
2445 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
2446 		    SHA1_DIGEST_LENGTH);
2447 	} else {
2448 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
2449 		    keylen_in_bytes);
2450 	}
2451 
2452 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2453 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
2454 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
2455 
2456 
2457 	return (CRYPTO_SUCCESS);
2458 }
2459 
2460 static int
2461 sha1_free_context(crypto_ctx_t *ctx)
2462 {
2463 	uint_t ctx_len;
2464 	sha1_mech_type_t mech_type;
2465 
2466 	if (ctx->cc_provider_private == NULL)
2467 		return (CRYPTO_SUCCESS);
2468 
2469 	/*
2470 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
2471 	 * have different lengths.
2472 	 */
2473 
2474 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
2475 	if (mech_type == SHA1_MECH_INFO_TYPE)
2476 		ctx_len = sizeof (sha1_ctx_t);
2477 	else {
2478 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
2479 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
2480 		ctx_len = sizeof (sha1_hmac_ctx_t);
2481 	}
2482 
2483 	bzero(ctx->cc_provider_private, ctx_len);
2484 	kmem_free(ctx->cc_provider_private, ctx_len);
2485 	ctx->cc_provider_private = NULL;
2486 
2487 	return (CRYPTO_SUCCESS);
2488 }
2489 
2490 #endif /* _KERNEL */
2491