1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/modctl.h>
28 #include <sys/cmn_err.h>
29 #include <sys/note.h>
30 #include <sys/crypto/common.h>
31 #include <sys/crypto/spi.h>
32 #include <sys/strsun.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 
36 #include <sys/sha1.h>
37 #include <sha1/sha1_impl.h>
38 
39 /*
40  * The sha1 module is created with two modlinkages:
41  * - a modlmisc that allows consumers to directly call the entry points
42  *   SHA1Init, SHA1Update, and SHA1Final.
43  * - a modlcrypto that allows the module to register with the Kernel
44  *   Cryptographic Framework (KCF) as a software provider for the SHA1
45  *   mechanisms.
46  */
47 
48 static struct modlmisc modlmisc = {
49 	&mod_miscops,
50 	"SHA1 Message-Digest Algorithm"
51 };
52 
53 static struct modlcrypto modlcrypto = {
54 	&mod_cryptoops,
55 	"SHA1 Kernel SW Provider 1.1"
56 };
57 
58 static struct modlinkage modlinkage = {
59 	MODREV_1, &modlmisc, &modlcrypto, NULL
60 };
61 
62 
63 /*
64  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
65  * by KCF to one of the entry points.
66  */
67 
68 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
69 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
70 
71 /* to extract the digest length passed as mechanism parameter */
72 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
73 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
74 		(len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
75 	else {								\
76 		ulong_t tmp_ulong;					\
77 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
78 		(len) = (uint32_t)tmp_ulong;				\
79 	}								\
80 }
81 
82 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
83 	SHA1Init(ctx);					\
84 	SHA1Update(ctx, key, len);			\
85 	SHA1Final(digest, ctx);				\
86 }
87 
88 /*
89  * Mechanism info structure passed to KCF during registration.
90  */
91 static crypto_mech_info_t sha1_mech_info_tab[] = {
92 	/* SHA1 */
93 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
94 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
95 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
96 	/* SHA1-HMAC */
97 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
98 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
99 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
100 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES},
101 	/* SHA1-HMAC GENERAL */
102 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
103 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
104 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
105 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES}
106 };
107 
108 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
109 
110 static crypto_control_ops_t sha1_control_ops = {
111 	sha1_provider_status
112 };
113 
114 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
115     crypto_req_handle_t);
116 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
117     crypto_req_handle_t);
118 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
119     crypto_req_handle_t);
120 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
121     crypto_req_handle_t);
122 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
123     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
124     crypto_req_handle_t);
125 
126 static crypto_digest_ops_t sha1_digest_ops = {
127 	sha1_digest_init,
128 	sha1_digest,
129 	sha1_digest_update,
130 	NULL,
131 	sha1_digest_final,
132 	sha1_digest_atomic
133 };
134 
135 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
136     crypto_spi_ctx_template_t, crypto_req_handle_t);
137 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
138     crypto_req_handle_t);
139 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
140 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
141     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
142     crypto_spi_ctx_template_t, crypto_req_handle_t);
143 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
144     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
145     crypto_spi_ctx_template_t, crypto_req_handle_t);
146 
147 static crypto_mac_ops_t sha1_mac_ops = {
148 	sha1_mac_init,
149 	NULL,
150 	sha1_mac_update,
151 	sha1_mac_final,
152 	sha1_mac_atomic,
153 	sha1_mac_verify_atomic
154 };
155 
156 static int sha1_create_ctx_template(crypto_provider_handle_t,
157     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
158     size_t *, crypto_req_handle_t);
159 static int sha1_free_context(crypto_ctx_t *);
160 
161 static crypto_ctx_ops_t sha1_ctx_ops = {
162 	sha1_create_ctx_template,
163 	sha1_free_context
164 };
165 
166 static void sha1_POST(int *);
167 
168 static crypto_fips140_ops_t sha1_fips140_ops = {
169 	sha1_POST
170 };
171 
172 static crypto_ops_t sha1_crypto_ops = {
173 	&sha1_control_ops,
174 	&sha1_digest_ops,
175 	NULL,
176 	&sha1_mac_ops,
177 	NULL,
178 	NULL,
179 	NULL,
180 	NULL,
181 	NULL,
182 	NULL,
183 	NULL,
184 	NULL,
185 	NULL,
186 	&sha1_ctx_ops,
187 	NULL,
188 	NULL,
189 	&sha1_fips140_ops
190 };
191 
192 static crypto_provider_info_t sha1_prov_info = {
193 	CRYPTO_SPI_VERSION_4,
194 	"SHA1 Software Provider",
195 	CRYPTO_SW_PROVIDER,
196 	{&modlinkage},
197 	NULL,
198 	&sha1_crypto_ops,
199 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
200 	sha1_mech_info_tab
201 };
202 
203 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
204 
205 int
206 _init()
207 {
208 	int ret;
209 
210 	if ((ret = mod_install(&modlinkage)) != 0)
211 		return (ret);
212 
213 	/*
214 	 * Register with KCF. If the registration fails, log an
215 	 * error but do not uninstall the module, since the functionality
216 	 * provided by misc/sha1 should still be available.
217 	 */
218 	if ((ret = crypto_register_provider(&sha1_prov_info,
219 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
220 		cmn_err(CE_WARN, "sha1 _init: "
221 		    "crypto_register_provider() failed (0x%x)", ret);
222 
223 	return (0);
224 }
225 
226 int
227 _info(struct modinfo *modinfop)
228 {
229 	return (mod_info(&modlinkage, modinfop));
230 }
231 
232 /*
233  * KCF software provider control entry points.
234  */
235 /* ARGSUSED */
236 static void
237 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
238 {
239 	*status = CRYPTO_PROVIDER_READY;
240 }
241 
242 /*
243  * KCF software provider digest entry points.
244  */
245 
246 static int
247 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
248     crypto_req_handle_t req)
249 {
250 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
251 		return (CRYPTO_MECHANISM_INVALID);
252 
253 	/*
254 	 * Allocate and initialize SHA1 context.
255 	 */
256 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
257 	    crypto_kmflag(req));
258 	if (ctx->cc_provider_private == NULL)
259 		return (CRYPTO_HOST_MEMORY);
260 
261 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
262 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
263 
264 	return (CRYPTO_SUCCESS);
265 }
266 
267 /*
268  * Helper SHA1 digest update function for uio data.
269  */
270 static int
271 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
272 {
273 	off_t offset = data->cd_offset;
274 	size_t length = data->cd_length;
275 	uint_t vec_idx;
276 	size_t cur_len;
277 
278 	/* we support only kernel buffer */
279 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
280 		return (CRYPTO_ARGUMENTS_BAD);
281 
282 	/*
283 	 * Jump to the first iovec containing data to be
284 	 * digested.
285 	 */
286 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
287 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
288 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
289 		;
290 	if (vec_idx == data->cd_uio->uio_iovcnt) {
291 		/*
292 		 * The caller specified an offset that is larger than the
293 		 * total size of the buffers it provided.
294 		 */
295 		return (CRYPTO_DATA_LEN_RANGE);
296 	}
297 
298 	/*
299 	 * Now do the digesting on the iovecs.
300 	 */
301 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
302 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
303 		    offset, length);
304 
305 		SHA1Update(sha1_ctx,
306 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
307 		    cur_len);
308 
309 		length -= cur_len;
310 		vec_idx++;
311 		offset = 0;
312 	}
313 
314 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
315 		/*
316 		 * The end of the specified iovec's was reached but
317 		 * the length requested could not be processed, i.e.
318 		 * The caller requested to digest more data than it provided.
319 		 */
320 		return (CRYPTO_DATA_LEN_RANGE);
321 	}
322 
323 	return (CRYPTO_SUCCESS);
324 }
325 
326 /*
327  * Helper SHA1 digest final function for uio data.
328  * digest_len is the length of the desired digest. If digest_len
329  * is smaller than the default SHA1 digest length, the caller
330  * must pass a scratch buffer, digest_scratch, which must
331  * be at least SHA1_DIGEST_LENGTH bytes.
332  */
333 static int
334 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
335     ulong_t digest_len, uchar_t *digest_scratch)
336 {
337 	off_t offset = digest->cd_offset;
338 	uint_t vec_idx;
339 
340 	/* we support only kernel buffer */
341 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
342 		return (CRYPTO_ARGUMENTS_BAD);
343 
344 	/*
345 	 * Jump to the first iovec containing ptr to the digest to
346 	 * be returned.
347 	 */
348 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
349 	    vec_idx < digest->cd_uio->uio_iovcnt;
350 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
351 		;
352 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
353 		/*
354 		 * The caller specified an offset that is
355 		 * larger than the total size of the buffers
356 		 * it provided.
357 		 */
358 		return (CRYPTO_DATA_LEN_RANGE);
359 	}
360 
361 	if (offset + digest_len <=
362 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
363 		/*
364 		 * The computed SHA1 digest will fit in the current
365 		 * iovec.
366 		 */
367 		if (digest_len != SHA1_DIGEST_LENGTH) {
368 			/*
369 			 * The caller requested a short digest. Digest
370 			 * into a scratch buffer and return to
371 			 * the user only what was requested.
372 			 */
373 			SHA1Final(digest_scratch, sha1_ctx);
374 			bcopy(digest_scratch, (uchar_t *)digest->
375 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
376 			    digest_len);
377 		} else {
378 			SHA1Final((uchar_t *)digest->
379 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
380 			    sha1_ctx);
381 		}
382 	} else {
383 		/*
384 		 * The computed digest will be crossing one or more iovec's.
385 		 * This is bad performance-wise but we need to support it.
386 		 * Allocate a small scratch buffer on the stack and
387 		 * copy it piece meal to the specified digest iovec's.
388 		 */
389 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
390 		off_t scratch_offset = 0;
391 		size_t length = digest_len;
392 		size_t cur_len;
393 
394 		SHA1Final(digest_tmp, sha1_ctx);
395 
396 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
397 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
398 			    offset, length);
399 			bcopy(digest_tmp + scratch_offset,
400 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
401 			    cur_len);
402 
403 			length -= cur_len;
404 			vec_idx++;
405 			scratch_offset += cur_len;
406 			offset = 0;
407 		}
408 
409 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
410 			/*
411 			 * The end of the specified iovec's was reached but
412 			 * the length requested could not be processed, i.e.
413 			 * The caller requested to digest more data than it
414 			 * provided.
415 			 */
416 			return (CRYPTO_DATA_LEN_RANGE);
417 		}
418 	}
419 
420 	return (CRYPTO_SUCCESS);
421 }
422 
423 /*
424  * Helper SHA1 digest update for mblk's.
425  */
426 static int
427 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
428 {
429 	off_t offset = data->cd_offset;
430 	size_t length = data->cd_length;
431 	mblk_t *mp;
432 	size_t cur_len;
433 
434 	/*
435 	 * Jump to the first mblk_t containing data to be digested.
436 	 */
437 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
438 	    offset -= MBLKL(mp), mp = mp->b_cont)
439 		;
440 	if (mp == NULL) {
441 		/*
442 		 * The caller specified an offset that is larger than the
443 		 * total size of the buffers it provided.
444 		 */
445 		return (CRYPTO_DATA_LEN_RANGE);
446 	}
447 
448 	/*
449 	 * Now do the digesting on the mblk chain.
450 	 */
451 	while (mp != NULL && length > 0) {
452 		cur_len = MIN(MBLKL(mp) - offset, length);
453 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
454 		length -= cur_len;
455 		offset = 0;
456 		mp = mp->b_cont;
457 	}
458 
459 	if (mp == NULL && length > 0) {
460 		/*
461 		 * The end of the mblk was reached but the length requested
462 		 * could not be processed, i.e. The caller requested
463 		 * to digest more data than it provided.
464 		 */
465 		return (CRYPTO_DATA_LEN_RANGE);
466 	}
467 
468 	return (CRYPTO_SUCCESS);
469 }
470 
471 /*
472  * Helper SHA1 digest final for mblk's.
473  * digest_len is the length of the desired digest. If digest_len
474  * is smaller than the default SHA1 digest length, the caller
475  * must pass a scratch buffer, digest_scratch, which must
476  * be at least SHA1_DIGEST_LENGTH bytes.
477  */
478 static int
479 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
480     ulong_t digest_len, uchar_t *digest_scratch)
481 {
482 	off_t offset = digest->cd_offset;
483 	mblk_t *mp;
484 
485 	/*
486 	 * Jump to the first mblk_t that will be used to store the digest.
487 	 */
488 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
489 	    offset -= MBLKL(mp), mp = mp->b_cont)
490 		;
491 	if (mp == NULL) {
492 		/*
493 		 * The caller specified an offset that is larger than the
494 		 * total size of the buffers it provided.
495 		 */
496 		return (CRYPTO_DATA_LEN_RANGE);
497 	}
498 
499 	if (offset + digest_len <= MBLKL(mp)) {
500 		/*
501 		 * The computed SHA1 digest will fit in the current mblk.
502 		 * Do the SHA1Final() in-place.
503 		 */
504 		if (digest_len != SHA1_DIGEST_LENGTH) {
505 			/*
506 			 * The caller requested a short digest. Digest
507 			 * into a scratch buffer and return to
508 			 * the user only what was requested.
509 			 */
510 			SHA1Final(digest_scratch, sha1_ctx);
511 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
512 		} else {
513 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
514 		}
515 	} else {
516 		/*
517 		 * The computed digest will be crossing one or more mblk's.
518 		 * This is bad performance-wise but we need to support it.
519 		 * Allocate a small scratch buffer on the stack and
520 		 * copy it piece meal to the specified digest iovec's.
521 		 */
522 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
523 		off_t scratch_offset = 0;
524 		size_t length = digest_len;
525 		size_t cur_len;
526 
527 		SHA1Final(digest_tmp, sha1_ctx);
528 
529 		while (mp != NULL && length > 0) {
530 			cur_len = MIN(MBLKL(mp) - offset, length);
531 			bcopy(digest_tmp + scratch_offset,
532 			    mp->b_rptr + offset, cur_len);
533 
534 			length -= cur_len;
535 			mp = mp->b_cont;
536 			scratch_offset += cur_len;
537 			offset = 0;
538 		}
539 
540 		if (mp == NULL && length > 0) {
541 			/*
542 			 * The end of the specified mblk was reached but
543 			 * the length requested could not be processed, i.e.
544 			 * The caller requested to digest more data than it
545 			 * provided.
546 			 */
547 			return (CRYPTO_DATA_LEN_RANGE);
548 		}
549 	}
550 
551 	return (CRYPTO_SUCCESS);
552 }
553 
554 /* ARGSUSED */
555 static int
556 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
557     crypto_req_handle_t req)
558 {
559 	int ret = CRYPTO_SUCCESS;
560 
561 	ASSERT(ctx->cc_provider_private != NULL);
562 
563 	/*
564 	 * We need to just return the length needed to store the output.
565 	 * We should not destroy the context for the following cases.
566 	 */
567 	if ((digest->cd_length == 0) ||
568 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
569 		digest->cd_length = SHA1_DIGEST_LENGTH;
570 		return (CRYPTO_BUFFER_TOO_SMALL);
571 	}
572 
573 	/*
574 	 * Do the SHA1 update on the specified input data.
575 	 */
576 	switch (data->cd_format) {
577 	case CRYPTO_DATA_RAW:
578 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
579 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
580 		    data->cd_length);
581 		break;
582 	case CRYPTO_DATA_UIO:
583 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
584 		    data);
585 		break;
586 	case CRYPTO_DATA_MBLK:
587 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
588 		    data);
589 		break;
590 	default:
591 		ret = CRYPTO_ARGUMENTS_BAD;
592 	}
593 
594 	if (ret != CRYPTO_SUCCESS) {
595 		/* the update failed, free context and bail */
596 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
597 		ctx->cc_provider_private = NULL;
598 		digest->cd_length = 0;
599 		return (ret);
600 	}
601 
602 	/*
603 	 * Do a SHA1 final, must be done separately since the digest
604 	 * type can be different than the input data type.
605 	 */
606 	switch (digest->cd_format) {
607 	case CRYPTO_DATA_RAW:
608 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
609 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
610 		break;
611 	case CRYPTO_DATA_UIO:
612 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
613 		    digest, SHA1_DIGEST_LENGTH, NULL);
614 		break;
615 	case CRYPTO_DATA_MBLK:
616 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
617 		    digest, SHA1_DIGEST_LENGTH, NULL);
618 		break;
619 	default:
620 		ret = CRYPTO_ARGUMENTS_BAD;
621 	}
622 
623 	/* all done, free context and return */
624 
625 	if (ret == CRYPTO_SUCCESS) {
626 		digest->cd_length = SHA1_DIGEST_LENGTH;
627 	} else {
628 		digest->cd_length = 0;
629 	}
630 
631 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
632 	ctx->cc_provider_private = NULL;
633 	return (ret);
634 }
635 
636 /* ARGSUSED */
637 static int
638 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
639     crypto_req_handle_t req)
640 {
641 	int ret = CRYPTO_SUCCESS;
642 
643 	ASSERT(ctx->cc_provider_private != NULL);
644 
645 	/*
646 	 * Do the SHA1 update on the specified input data.
647 	 */
648 	switch (data->cd_format) {
649 	case CRYPTO_DATA_RAW:
650 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
651 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
652 		    data->cd_length);
653 		break;
654 	case CRYPTO_DATA_UIO:
655 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
656 		    data);
657 		break;
658 	case CRYPTO_DATA_MBLK:
659 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
660 		    data);
661 		break;
662 	default:
663 		ret = CRYPTO_ARGUMENTS_BAD;
664 	}
665 
666 	return (ret);
667 }
668 
669 /* ARGSUSED */
670 static int
671 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
672     crypto_req_handle_t req)
673 {
674 	int ret = CRYPTO_SUCCESS;
675 
676 	ASSERT(ctx->cc_provider_private != NULL);
677 
678 	/*
679 	 * We need to just return the length needed to store the output.
680 	 * We should not destroy the context for the following cases.
681 	 */
682 	if ((digest->cd_length == 0) ||
683 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
684 		digest->cd_length = SHA1_DIGEST_LENGTH;
685 		return (CRYPTO_BUFFER_TOO_SMALL);
686 	}
687 
688 	/*
689 	 * Do a SHA1 final.
690 	 */
691 	switch (digest->cd_format) {
692 	case CRYPTO_DATA_RAW:
693 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
694 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
695 		break;
696 	case CRYPTO_DATA_UIO:
697 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
698 		    digest, SHA1_DIGEST_LENGTH, NULL);
699 		break;
700 	case CRYPTO_DATA_MBLK:
701 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
702 		    digest, SHA1_DIGEST_LENGTH, NULL);
703 		break;
704 	default:
705 		ret = CRYPTO_ARGUMENTS_BAD;
706 	}
707 
708 	/* all done, free context and return */
709 
710 	if (ret == CRYPTO_SUCCESS) {
711 		digest->cd_length = SHA1_DIGEST_LENGTH;
712 	} else {
713 		digest->cd_length = 0;
714 	}
715 
716 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
717 	ctx->cc_provider_private = NULL;
718 
719 	return (ret);
720 }
721 
722 /* ARGSUSED */
723 static int
724 sha1_digest_atomic(crypto_provider_handle_t provider,
725     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
726     crypto_data_t *data, crypto_data_t *digest,
727     crypto_req_handle_t req)
728 {
729 	int ret = CRYPTO_SUCCESS;
730 	SHA1_CTX sha1_ctx;
731 
732 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
733 		return (CRYPTO_MECHANISM_INVALID);
734 
735 	/*
736 	 * Do the SHA1 init.
737 	 */
738 	SHA1Init(&sha1_ctx);
739 
740 	/*
741 	 * Do the SHA1 update on the specified input data.
742 	 */
743 	switch (data->cd_format) {
744 	case CRYPTO_DATA_RAW:
745 		SHA1Update(&sha1_ctx,
746 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
747 		    data->cd_length);
748 		break;
749 	case CRYPTO_DATA_UIO:
750 		ret = sha1_digest_update_uio(&sha1_ctx, data);
751 		break;
752 	case CRYPTO_DATA_MBLK:
753 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
754 		break;
755 	default:
756 		ret = CRYPTO_ARGUMENTS_BAD;
757 	}
758 
759 	if (ret != CRYPTO_SUCCESS) {
760 		/* the update failed, bail */
761 		digest->cd_length = 0;
762 		return (ret);
763 	}
764 
765 	/*
766 	 * Do a SHA1 final, must be done separately since the digest
767 	 * type can be different than the input data type.
768 	 */
769 	switch (digest->cd_format) {
770 	case CRYPTO_DATA_RAW:
771 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
772 		    digest->cd_offset, &sha1_ctx);
773 		break;
774 	case CRYPTO_DATA_UIO:
775 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
776 		    SHA1_DIGEST_LENGTH, NULL);
777 		break;
778 	case CRYPTO_DATA_MBLK:
779 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
780 		    SHA1_DIGEST_LENGTH, NULL);
781 		break;
782 	default:
783 		ret = CRYPTO_ARGUMENTS_BAD;
784 	}
785 
786 	if (ret == CRYPTO_SUCCESS) {
787 		digest->cd_length = SHA1_DIGEST_LENGTH;
788 	} else {
789 		digest->cd_length = 0;
790 	}
791 
792 	return (ret);
793 }
794 
795 /*
796  * KCF software provider mac entry points.
797  *
798  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
799  *
800  * Init:
801  * The initialization routine initializes what we denote
802  * as the inner and outer contexts by doing
803  * - for inner context: SHA1(key XOR ipad)
804  * - for outer context: SHA1(key XOR opad)
805  *
806  * Update:
807  * Each subsequent SHA1 HMAC update will result in an
808  * update of the inner context with the specified data.
809  *
810  * Final:
811  * The SHA1 HMAC final will do a SHA1 final operation on the
812  * inner context, and the resulting digest will be used
813  * as the data for an update on the outer context. Last
814  * but not least, a SHA1 final on the outer context will
815  * be performed to obtain the SHA1 HMAC digest to return
816  * to the user.
817  */
818 
819 /*
820  * Initialize a SHA1-HMAC context.
821  */
822 static void
823 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
824 {
825 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
826 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
827 	uint_t i;
828 
829 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
830 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
831 
832 	bcopy(keyval, ipad, length_in_bytes);
833 	bcopy(keyval, opad, length_in_bytes);
834 
835 	/* XOR key with ipad (0x36) and opad (0x5c) */
836 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
837 		ipad[i] ^= 0x36363636;
838 		opad[i] ^= 0x5c5c5c5c;
839 	}
840 
841 	/* perform SHA1 on ipad */
842 	SHA1Init(&ctx->hc_icontext);
843 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
844 
845 	/* perform SHA1 on opad */
846 	SHA1Init(&ctx->hc_ocontext);
847 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
848 }
849 
850 /*
851  */
852 static int
853 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
854     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
855     crypto_req_handle_t req)
856 {
857 	int ret = CRYPTO_SUCCESS;
858 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
859 
860 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
861 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
862 		return (CRYPTO_MECHANISM_INVALID);
863 
864 	/* Add support for key by attributes (RFE 4706552) */
865 	if (key->ck_format != CRYPTO_KEY_RAW)
866 		return (CRYPTO_ARGUMENTS_BAD);
867 
868 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
869 	    crypto_kmflag(req));
870 	if (ctx->cc_provider_private == NULL)
871 		return (CRYPTO_HOST_MEMORY);
872 
873 	if (ctx_template != NULL) {
874 		/* reuse context template */
875 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
876 		    sizeof (sha1_hmac_ctx_t));
877 	} else {
878 		/* no context template, compute context */
879 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
880 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
881 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
882 
883 			/*
884 			 * Hash the passed-in key to get a smaller key.
885 			 * The inner context is used since it hasn't been
886 			 * initialized yet.
887 			 */
888 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
889 			    key->ck_data, keylen_in_bytes, digested_key);
890 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
891 			    digested_key, SHA1_DIGEST_LENGTH);
892 		} else {
893 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
894 			    key->ck_data, keylen_in_bytes);
895 		}
896 	}
897 
898 	/*
899 	 * Get the mechanism parameters, if applicable.
900 	 */
901 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
902 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
903 		if (mechanism->cm_param == NULL ||
904 		    mechanism->cm_param_len != sizeof (ulong_t))
905 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
906 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
907 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
908 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
909 		    SHA1_DIGEST_LENGTH)
910 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
911 	}
912 
913 	if (ret != CRYPTO_SUCCESS) {
914 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
915 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
916 		ctx->cc_provider_private = NULL;
917 	}
918 
919 	return (ret);
920 }
921 
922 /* ARGSUSED */
923 static int
924 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
925 {
926 	int ret = CRYPTO_SUCCESS;
927 
928 	ASSERT(ctx->cc_provider_private != NULL);
929 
930 	/*
931 	 * Do a SHA1 update of the inner context using the specified
932 	 * data.
933 	 */
934 	switch (data->cd_format) {
935 	case CRYPTO_DATA_RAW:
936 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
937 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
938 		    data->cd_length);
939 		break;
940 	case CRYPTO_DATA_UIO:
941 		ret = sha1_digest_update_uio(
942 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
943 		break;
944 	case CRYPTO_DATA_MBLK:
945 		ret = sha1_digest_update_mblk(
946 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
947 		break;
948 	default:
949 		ret = CRYPTO_ARGUMENTS_BAD;
950 	}
951 
952 	return (ret);
953 }
954 
955 /* ARGSUSED */
956 static int
957 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
958 {
959 	int ret = CRYPTO_SUCCESS;
960 	uchar_t digest[SHA1_DIGEST_LENGTH];
961 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
962 
963 	ASSERT(ctx->cc_provider_private != NULL);
964 
965 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
966 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
967 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
968 
969 	/*
970 	 * We need to just return the length needed to store the output.
971 	 * We should not destroy the context for the following cases.
972 	 */
973 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
974 		mac->cd_length = digest_len;
975 		return (CRYPTO_BUFFER_TOO_SMALL);
976 	}
977 
978 	/*
979 	 * Do a SHA1 final on the inner context.
980 	 */
981 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
982 
983 	/*
984 	 * Do a SHA1 update on the outer context, feeding the inner
985 	 * digest as data.
986 	 */
987 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
988 	    SHA1_DIGEST_LENGTH);
989 
990 	/*
991 	 * Do a SHA1 final on the outer context, storing the computing
992 	 * digest in the users buffer.
993 	 */
994 	switch (mac->cd_format) {
995 	case CRYPTO_DATA_RAW:
996 		if (digest_len != SHA1_DIGEST_LENGTH) {
997 			/*
998 			 * The caller requested a short digest. Digest
999 			 * into a scratch buffer and return to
1000 			 * the user only what was requested.
1001 			 */
1002 			SHA1Final(digest,
1003 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1004 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1005 			    mac->cd_offset, digest_len);
1006 		} else {
1007 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1008 			    mac->cd_offset,
1009 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1010 		}
1011 		break;
1012 	case CRYPTO_DATA_UIO:
1013 		ret = sha1_digest_final_uio(
1014 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1015 		    digest_len, digest);
1016 		break;
1017 	case CRYPTO_DATA_MBLK:
1018 		ret = sha1_digest_final_mblk(
1019 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1020 		    digest_len, digest);
1021 		break;
1022 	default:
1023 		ret = CRYPTO_ARGUMENTS_BAD;
1024 	}
1025 
1026 	if (ret == CRYPTO_SUCCESS) {
1027 		mac->cd_length = digest_len;
1028 	} else {
1029 		mac->cd_length = 0;
1030 	}
1031 
1032 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1033 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1034 	ctx->cc_provider_private = NULL;
1035 
1036 	return (ret);
1037 }
1038 
1039 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
1040 	switch (data->cd_format) {					\
1041 	case CRYPTO_DATA_RAW:						\
1042 		SHA1Update(&(ctx).hc_icontext,				\
1043 		    (uint8_t *)data->cd_raw.iov_base +			\
1044 		    data->cd_offset, data->cd_length);			\
1045 		break;							\
1046 	case CRYPTO_DATA_UIO:						\
1047 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1048 		break;							\
1049 	case CRYPTO_DATA_MBLK:						\
1050 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
1051 		    data);						\
1052 		break;							\
1053 	default:							\
1054 		ret = CRYPTO_ARGUMENTS_BAD;				\
1055 	}								\
1056 }
1057 
1058 /* ARGSUSED */
1059 static int
1060 sha1_mac_atomic(crypto_provider_handle_t provider,
1061     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1062     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1063     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1064 {
1065 	int ret = CRYPTO_SUCCESS;
1066 	uchar_t digest[SHA1_DIGEST_LENGTH];
1067 	sha1_hmac_ctx_t sha1_hmac_ctx;
1068 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1069 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1070 
1071 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1072 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1073 		return (CRYPTO_MECHANISM_INVALID);
1074 
1075 	/* Add support for key by attributes (RFE 4706552) */
1076 	if (key->ck_format != CRYPTO_KEY_RAW)
1077 		return (CRYPTO_ARGUMENTS_BAD);
1078 
1079 	if (ctx_template != NULL) {
1080 		/* reuse context template */
1081 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1082 	} else {
1083 		/* no context template, initialize context */
1084 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1085 			/*
1086 			 * Hash the passed-in key to get a smaller key.
1087 			 * The inner context is used since it hasn't been
1088 			 * initialized yet.
1089 			 */
1090 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1091 			    key->ck_data, keylen_in_bytes, digest);
1092 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1093 			    SHA1_DIGEST_LENGTH);
1094 		} else {
1095 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1096 			    keylen_in_bytes);
1097 		}
1098 	}
1099 
1100 	/* get the mechanism parameters, if applicable */
1101 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1102 		if (mechanism->cm_param == NULL ||
1103 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1104 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1105 			goto bail;
1106 		}
1107 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1108 		if (digest_len > SHA1_DIGEST_LENGTH) {
1109 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1110 			goto bail;
1111 		}
1112 	}
1113 
1114 	/* do a SHA1 update of the inner context using the specified data */
1115 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1116 	if (ret != CRYPTO_SUCCESS)
1117 		/* the update failed, free context and bail */
1118 		goto bail;
1119 
1120 	/*
1121 	 * Do a SHA1 final on the inner context.
1122 	 */
1123 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1124 
1125 	/*
1126 	 * Do an SHA1 update on the outer context, feeding the inner
1127 	 * digest as data.
1128 	 */
1129 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1130 
1131 	/*
1132 	 * Do a SHA1 final on the outer context, storing the computed
1133 	 * digest in the users buffer.
1134 	 */
1135 	switch (mac->cd_format) {
1136 	case CRYPTO_DATA_RAW:
1137 		if (digest_len != SHA1_DIGEST_LENGTH) {
1138 			/*
1139 			 * The caller requested a short digest. Digest
1140 			 * into a scratch buffer and return to
1141 			 * the user only what was requested.
1142 			 */
1143 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1144 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1145 			    mac->cd_offset, digest_len);
1146 		} else {
1147 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1148 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1149 		}
1150 		break;
1151 	case CRYPTO_DATA_UIO:
1152 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1153 		    digest_len, digest);
1154 		break;
1155 	case CRYPTO_DATA_MBLK:
1156 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1157 		    digest_len, digest);
1158 		break;
1159 	default:
1160 		ret = CRYPTO_ARGUMENTS_BAD;
1161 	}
1162 
1163 	if (ret == CRYPTO_SUCCESS) {
1164 		mac->cd_length = digest_len;
1165 	} else {
1166 		mac->cd_length = 0;
1167 	}
1168 	/* Extra paranoia: zeroize the context on the stack */
1169 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1170 
1171 	return (ret);
1172 bail:
1173 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1174 	mac->cd_length = 0;
1175 	return (ret);
1176 }
1177 
1178 /* ARGSUSED */
1179 static int
1180 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1181     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1182     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1183     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1184 {
1185 	int ret = CRYPTO_SUCCESS;
1186 	uchar_t digest[SHA1_DIGEST_LENGTH];
1187 	sha1_hmac_ctx_t sha1_hmac_ctx;
1188 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1189 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1190 
1191 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1192 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1193 		return (CRYPTO_MECHANISM_INVALID);
1194 
1195 	/* Add support for key by attributes (RFE 4706552) */
1196 	if (key->ck_format != CRYPTO_KEY_RAW)
1197 		return (CRYPTO_ARGUMENTS_BAD);
1198 
1199 	if (ctx_template != NULL) {
1200 		/* reuse context template */
1201 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1202 	} else {
1203 		/* no context template, initialize context */
1204 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1205 			/*
1206 			 * Hash the passed-in key to get a smaller key.
1207 			 * The inner context is used since it hasn't been
1208 			 * initialized yet.
1209 			 */
1210 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1211 			    key->ck_data, keylen_in_bytes, digest);
1212 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1213 			    SHA1_DIGEST_LENGTH);
1214 		} else {
1215 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1216 			    keylen_in_bytes);
1217 		}
1218 	}
1219 
1220 	/* get the mechanism parameters, if applicable */
1221 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1222 		if (mechanism->cm_param == NULL ||
1223 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1224 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1225 			goto bail;
1226 		}
1227 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1228 		if (digest_len > SHA1_DIGEST_LENGTH) {
1229 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1230 			goto bail;
1231 		}
1232 	}
1233 
1234 	if (mac->cd_length != digest_len) {
1235 		ret = CRYPTO_INVALID_MAC;
1236 		goto bail;
1237 	}
1238 
1239 	/* do a SHA1 update of the inner context using the specified data */
1240 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1241 	if (ret != CRYPTO_SUCCESS)
1242 		/* the update failed, free context and bail */
1243 		goto bail;
1244 
1245 	/* do a SHA1 final on the inner context */
1246 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1247 
1248 	/*
1249 	 * Do an SHA1 update on the outer context, feeding the inner
1250 	 * digest as data.
1251 	 */
1252 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1253 
1254 	/*
1255 	 * Do a SHA1 final on the outer context, storing the computed
1256 	 * digest in the users buffer.
1257 	 */
1258 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1259 
1260 	/*
1261 	 * Compare the computed digest against the expected digest passed
1262 	 * as argument.
1263 	 */
1264 
1265 	switch (mac->cd_format) {
1266 
1267 	case CRYPTO_DATA_RAW:
1268 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1269 		    mac->cd_offset, digest_len) != 0)
1270 			ret = CRYPTO_INVALID_MAC;
1271 		break;
1272 
1273 	case CRYPTO_DATA_UIO: {
1274 		off_t offset = mac->cd_offset;
1275 		uint_t vec_idx;
1276 		off_t scratch_offset = 0;
1277 		size_t length = digest_len;
1278 		size_t cur_len;
1279 
1280 		/* we support only kernel buffer */
1281 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1282 			return (CRYPTO_ARGUMENTS_BAD);
1283 
1284 		/* jump to the first iovec containing the expected digest */
1285 		for (vec_idx = 0;
1286 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1287 		    vec_idx < mac->cd_uio->uio_iovcnt;
1288 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
1289 			;
1290 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
1291 			/*
1292 			 * The caller specified an offset that is
1293 			 * larger than the total size of the buffers
1294 			 * it provided.
1295 			 */
1296 			ret = CRYPTO_DATA_LEN_RANGE;
1297 			break;
1298 		}
1299 
1300 		/* do the comparison of computed digest vs specified one */
1301 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1302 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1303 			    offset, length);
1304 
1305 			if (bcmp(digest + scratch_offset,
1306 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1307 			    cur_len) != 0) {
1308 				ret = CRYPTO_INVALID_MAC;
1309 				break;
1310 			}
1311 
1312 			length -= cur_len;
1313 			vec_idx++;
1314 			scratch_offset += cur_len;
1315 			offset = 0;
1316 		}
1317 		break;
1318 	}
1319 
1320 	case CRYPTO_DATA_MBLK: {
1321 		off_t offset = mac->cd_offset;
1322 		mblk_t *mp;
1323 		off_t scratch_offset = 0;
1324 		size_t length = digest_len;
1325 		size_t cur_len;
1326 
1327 		/* jump to the first mblk_t containing the expected digest */
1328 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1329 		    offset -= MBLKL(mp), mp = mp->b_cont)
1330 			;
1331 		if (mp == NULL) {
1332 			/*
1333 			 * The caller specified an offset that is larger than
1334 			 * the total size of the buffers it provided.
1335 			 */
1336 			ret = CRYPTO_DATA_LEN_RANGE;
1337 			break;
1338 		}
1339 
1340 		while (mp != NULL && length > 0) {
1341 			cur_len = MIN(MBLKL(mp) - offset, length);
1342 			if (bcmp(digest + scratch_offset,
1343 			    mp->b_rptr + offset, cur_len) != 0) {
1344 				ret = CRYPTO_INVALID_MAC;
1345 				break;
1346 			}
1347 
1348 			length -= cur_len;
1349 			mp = mp->b_cont;
1350 			scratch_offset += cur_len;
1351 			offset = 0;
1352 		}
1353 		break;
1354 	}
1355 
1356 	default:
1357 		ret = CRYPTO_ARGUMENTS_BAD;
1358 	}
1359 
1360 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1361 	return (ret);
1362 bail:
1363 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1364 	mac->cd_length = 0;
1365 	return (ret);
1366 }
1367 
1368 /*
1369  * KCF software provider context management entry points.
1370  */
1371 
1372 /* ARGSUSED */
1373 static int
1374 sha1_create_ctx_template(crypto_provider_handle_t provider,
1375     crypto_mechanism_t *mechanism, crypto_key_t *key,
1376     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1377     crypto_req_handle_t req)
1378 {
1379 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1380 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1381 
1382 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1383 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1384 		return (CRYPTO_MECHANISM_INVALID);
1385 	}
1386 
1387 	/* Add support for key by attributes (RFE 4706552) */
1388 	if (key->ck_format != CRYPTO_KEY_RAW)
1389 		return (CRYPTO_ARGUMENTS_BAD);
1390 
1391 	/*
1392 	 * Allocate and initialize SHA1 context.
1393 	 */
1394 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1395 	    crypto_kmflag(req));
1396 	if (sha1_hmac_ctx_tmpl == NULL)
1397 		return (CRYPTO_HOST_MEMORY);
1398 
1399 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1400 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
1401 
1402 		/*
1403 		 * Hash the passed-in key to get a smaller key.
1404 		 * The inner context is used since it hasn't been
1405 		 * initialized yet.
1406 		 */
1407 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1408 		    key->ck_data, keylen_in_bytes, digested_key);
1409 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1410 		    SHA1_DIGEST_LENGTH);
1411 	} else {
1412 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1413 		    keylen_in_bytes);
1414 	}
1415 
1416 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1417 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1418 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
1419 
1420 
1421 	return (CRYPTO_SUCCESS);
1422 }
1423 
1424 static int
1425 sha1_free_context(crypto_ctx_t *ctx)
1426 {
1427 	uint_t ctx_len;
1428 	sha1_mech_type_t mech_type;
1429 
1430 	if (ctx->cc_provider_private == NULL)
1431 		return (CRYPTO_SUCCESS);
1432 
1433 	/*
1434 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
1435 	 * have different lengths.
1436 	 */
1437 
1438 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1439 	if (mech_type == SHA1_MECH_INFO_TYPE)
1440 		ctx_len = sizeof (sha1_ctx_t);
1441 	else {
1442 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1443 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1444 		ctx_len = sizeof (sha1_hmac_ctx_t);
1445 	}
1446 
1447 	bzero(ctx->cc_provider_private, ctx_len);
1448 	kmem_free(ctx->cc_provider_private, ctx_len);
1449 	ctx->cc_provider_private = NULL;
1450 
1451 	return (CRYPTO_SUCCESS);
1452 }
1453 
1454 /*
1455  * SHA-1 Power-Up Self-Test
1456  */
1457 void
1458 sha1_POST(int *rc)
1459 {
1460 
1461 	*rc = fips_sha1_post();
1462 
1463 }
1464