1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2018, Joyent, Inc.
26  * Copyright 2017 Jason King.
27  */
28 
29 #include <pthread.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <strings.h>
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <security/cryptoki.h>
36 #include <aes_impl.h>
37 #include <cryptoutil.h>
38 #include "softSession.h"
39 #include "softObject.h"
40 #include "softCrypt.h"
41 #include "softOps.h"
42 
43 /*
44  * Check that the mechanism parameter is present and the correct size if
45  * required and allocate an AES context.
46  */
47 static CK_RV
48 soft_aes_check_mech_param(CK_MECHANISM_PTR mech, aes_ctx_t **ctxp)
49 {
50 	void *(*allocf)(int) = NULL;
51 	size_t param_len = 0;
52 	boolean_t param_req = B_TRUE;
53 
54 	switch (mech->mechanism) {
55 	case CKM_AES_ECB:
56 		param_req = B_FALSE;
57 		allocf = ecb_alloc_ctx;
58 		break;
59 	case CKM_AES_CMAC:
60 		param_req = B_FALSE;
61 		allocf = cmac_alloc_ctx;
62 		break;
63 	case CKM_AES_CMAC_GENERAL:
64 		param_len = sizeof (CK_MAC_GENERAL_PARAMS);
65 		allocf = cmac_alloc_ctx;
66 		break;
67 	case CKM_AES_CBC:
68 	case CKM_AES_CBC_PAD:
69 		param_len = AES_BLOCK_LEN;
70 		allocf = cbc_alloc_ctx;
71 		break;
72 	case CKM_AES_CTR:
73 		param_len = sizeof (CK_AES_CTR_PARAMS);
74 		allocf = ctr_alloc_ctx;
75 		break;
76 	case CKM_AES_CCM:
77 		param_len = sizeof (CK_CCM_PARAMS);
78 		allocf = ccm_alloc_ctx;
79 		break;
80 	case CKM_AES_GCM:
81 		param_len = sizeof (CK_GCM_PARAMS);
82 		allocf = gcm_alloc_ctx;
83 		break;
84 	default:
85 		return (CKR_MECHANISM_INVALID);
86 	}
87 
88 	if (param_req && (mech->pParameter == NULL ||
89 	    mech->ulParameterLen != param_len)) {
90 		return (CKR_MECHANISM_PARAM_INVALID);
91 	}
92 
93 	*ctxp = allocf(0);
94 	if (*ctxp == NULL) {
95 		return (CKR_HOST_MEMORY);
96 	}
97 
98 	return (CKR_OK);
99 }
100 
101 /*
102  * Create an AES key schedule for the given AES context from the given key.
103  * If the key is not sensitive, cache a copy of the key schedule in the
104  * key object and/or use the cached copy of the key schedule.
105  *
106  * Must be called before the init function for a given mode is called.
107  */
108 static CK_RV
109 soft_aes_init_key(aes_ctx_t *aes_ctx, soft_object_t *key_p)
110 {
111 	void *ks = NULL;
112 	size_t size = 0;
113 	CK_RV rv = CKR_OK;
114 
115 	(void) pthread_mutex_lock(&key_p->object_mutex);
116 
117 	/*
118 	 * AES keys should be either 128, 192, or 256 bits long.
119 	 * soft_object_t stores the key size in bytes, so we check those sizes
120 	 * in bytes.
121 	 *
122 	 * While soft_build_secret_key_object() does these same validations for
123 	 * keys created by the user, it may be possible that a key loaded from
124 	 * disk could be invalid or corrupt.  We err on the side of caution
125 	 * and check again that it's the correct size before performing any
126 	 * AES operations.
127 	 */
128 	switch (OBJ_SEC_VALUE_LEN(key_p)) {
129 	case AES_MIN_KEY_BYTES:
130 	case AES_MAX_KEY_BYTES:
131 	case AES_192_KEY_BYTES:
132 		break;
133 	default:
134 		rv = CKR_KEY_SIZE_RANGE;
135 		goto done;
136 	}
137 
138 	ks = aes_alloc_keysched(&size, 0);
139 	if (ks == NULL) {
140 		rv = CKR_HOST_MEMORY;
141 		goto done;
142 	}
143 
144 	/* If this is a sensitive key, always expand the key schedule */
145 	if (key_p->bool_attr_mask & SENSITIVE_BOOL_ON) {
146 		/* aes_init_keysched() requires key length in bits.  */
147 #ifdef	__sparcv9
148 		/* LINTED */
149 		aes_init_keysched(OBJ_SEC_VALUE(key_p), (uint_t)
150 		    (OBJ_SEC_VALUE_LEN(key_p) * NBBY), ks);
151 #else	/* !__sparcv9 */
152 		aes_init_keysched(OBJ_SEC_VALUE(key_p),
153 		    (OBJ_SEC_VALUE_LEN(key_p) * NBBY), ks);
154 #endif	/* __sparcv9 */
155 
156 		goto done;
157 	}
158 
159 	/* If a non-sensitive key and doesn't have a key schedule, create it */
160 	if (OBJ_KEY_SCHED(key_p) == NULL) {
161 		void *obj_ks = NULL;
162 
163 		obj_ks = aes_alloc_keysched(&size, 0);
164 		if (obj_ks == NULL) {
165 			rv = CKR_HOST_MEMORY;
166 			goto done;
167 		}
168 
169 #ifdef	__sparcv9
170 		/* LINTED */
171 		aes_init_keysched(OBJ_SEC_VALUE(key_p),
172 		    (uint_t)(OBJ_SEC_VALUE_LEN(key_p) * 8), obj_ks);
173 #else	/* !__sparcv9 */
174 		aes_init_keysched(OBJ_SEC_VALUE(key_p),
175 		    (OBJ_SEC_VALUE_LEN(key_p) * 8), obj_ks);
176 #endif	/* __sparcv9 */
177 
178 		OBJ_KEY_SCHED_LEN(key_p) = size;
179 		OBJ_KEY_SCHED(key_p) = obj_ks;
180 	}
181 
182 	(void) memcpy(ks, OBJ_KEY_SCHED(key_p), OBJ_KEY_SCHED_LEN(key_p));
183 
184 done:
185 	(void) pthread_mutex_unlock(&key_p->object_mutex);
186 
187 	if (rv == CKR_OK) {
188 		aes_ctx->ac_keysched = ks;
189 		aes_ctx->ac_keysched_len = size;
190 	} else {
191 		freezero(ks, size);
192 	}
193 
194 	return (rv);
195 }
196 
197 /*
198  * Initialize the AES context for the given mode, including allocating and
199  * expanding the key schedule if required.
200  */
201 static CK_RV
202 soft_aes_init_ctx(aes_ctx_t *aes_ctx, CK_MECHANISM_PTR mech_p,
203     boolean_t encrypt)
204 {
205 	int rc = CRYPTO_SUCCESS;
206 
207 	switch (mech_p->mechanism) {
208 	case CKM_AES_ECB:
209 		aes_ctx->ac_flags |= ECB_MODE;
210 		break;
211 	case CKM_AES_CMAC:
212 	case CKM_AES_CMAC_GENERAL:
213 		rc = cmac_init_ctx((cbc_ctx_t *)aes_ctx, AES_BLOCK_LEN);
214 		break;
215 	case CKM_AES_CBC:
216 	case CKM_AES_CBC_PAD:
217 		rc = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mech_p->pParameter,
218 		    mech_p->ulParameterLen, AES_BLOCK_LEN, aes_copy_block64);
219 		break;
220 	case CKM_AES_CTR:
221 	{
222 		/*
223 		 * soft_aes_check_param() verifies this is !NULL and is the
224 		 * correct size.
225 		 */
226 		CK_AES_CTR_PARAMS *pp = (CK_AES_CTR_PARAMS *)mech_p->pParameter;
227 
228 		rc = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
229 		    pp->cb, aes_copy_block);
230 		break;
231 	}
232 	case CKM_AES_CCM: {
233 		CK_CCM_PARAMS *pp = (CK_CCM_PARAMS *)mech_p->pParameter;
234 
235 		/*
236 		 * The illumos ccm mode implementation predates the PKCS#11
237 		 * version that specifies CK_CCM_PARAMS.  As a result, the order
238 		 * and names of the struct members are different, so we must
239 		 * translate.  ccm_init_ctx() does not store a ref ccm_params,
240 		 * so it is safe to allocate on the stack.
241 		 */
242 		CK_AES_CCM_PARAMS ccm_params = {
243 			.ulMACSize = pp->ulMACLen,
244 			.ulNonceSize = pp->ulNonceLen,
245 			.ulAuthDataSize = pp->ulAADLen,
246 			.ulDataSize = pp->ulDataLen,
247 			.nonce = pp->pNonce,
248 			.authData = pp->pAAD
249 		};
250 
251 		rc = ccm_init_ctx((ccm_ctx_t *)aes_ctx, (char *)&ccm_params, 0,
252 		    encrypt, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
253 		break;
254 	}
255 	case CKM_AES_GCM:
256 		/*
257 		 * Similar to the ccm mode implementation, the gcm mode also
258 		 * predates PKCS#11 2.40, however in this instance
259 		 * CK_AES_GCM_PARAMS and CK_GCM_PARAMS are identical except
260 		 * for the member names, so we can just pass it along.
261 		 */
262 		rc = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mech_p->pParameter,
263 		    AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
264 		    aes_xor_block);
265 		break;
266 	}
267 
268 	return (crypto2pkcs11_error_number(rc));
269 }
270 
271 /*
272  * Allocate context for the active encryption or decryption operation, and
273  * generate AES key schedule to speed up the operation.
274  */
275 CK_RV
276 soft_aes_crypt_init_common(soft_session_t *session_p,
277     CK_MECHANISM_PTR pMechanism, soft_object_t *key_p,
278     boolean_t encrypt)
279 {
280 	aes_ctx_t *aes_ctx = NULL;
281 	CK_RV rv = CKR_OK;
282 
283 	if (key_p->key_type != CKK_AES)
284 		return (CKR_KEY_TYPE_INCONSISTENT);
285 
286 	/* C_{Encrypt,Decrypt}Init() validate pMechanism != NULL */
287 	rv = soft_aes_check_mech_param(pMechanism, &aes_ctx);
288 	if (rv != CKR_OK) {
289 		goto done;
290 	}
291 
292 	rv = soft_aes_init_key(aes_ctx, key_p);
293 	if (rv != CKR_OK) {
294 		goto done;
295 	}
296 
297 	rv = soft_aes_init_ctx(aes_ctx, pMechanism, encrypt);
298 	if (rv != CKR_OK) {
299 		goto done;
300 	}
301 
302 	(void) pthread_mutex_lock(&session_p->session_mutex);
303 	if (encrypt) {
304 		/* Called by C_EncryptInit. */
305 		session_p->encrypt.context = aes_ctx;
306 		session_p->encrypt.mech.mechanism = pMechanism->mechanism;
307 	} else {
308 		/* Called by C_DecryptInit. */
309 		session_p->decrypt.context = aes_ctx;
310 		session_p->decrypt.mech.mechanism = pMechanism->mechanism;
311 	}
312 	(void) pthread_mutex_unlock(&session_p->session_mutex);
313 
314 done:
315 	if (rv != CKR_OK) {
316 		soft_aes_free_ctx(aes_ctx);
317 	}
318 
319 	return (rv);
320 }
321 
322 
323 CK_RV
324 soft_aes_encrypt(soft_session_t *session_p, CK_BYTE_PTR pData,
325     CK_ULONG ulDataLen, CK_BYTE_PTR pEncryptedData,
326     CK_ULONG_PTR pulEncryptedDataLen)
327 {
328 	aes_ctx_t *aes_ctx = session_p->encrypt.context;
329 	CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
330 	size_t length_needed;
331 	size_t remainder;
332 	int rc = CRYPTO_SUCCESS;
333 	CK_RV rv = CKR_OK;
334 	crypto_data_t out = {
335 		.cd_format = CRYPTO_DATA_RAW,
336 		.cd_offset = 0,
337 		.cd_length = *pulEncryptedDataLen,
338 		.cd_raw.iov_base = (char *)pEncryptedData,
339 		.cd_raw.iov_len = *pulEncryptedDataLen
340 	};
341 
342 	/*
343 	 * A bit unusual, but it's permissible for ccm and gcm modes to not
344 	 * encrypt any data.  This ends up being equivalent to CKM_AES_CMAC
345 	 * or CKM_AES_GMAC of the additional authenticated data (AAD).
346 	 */
347 	if ((pData == NULL || ulDataLen == 0) &&
348 	    !(aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|CMAC_MODE))) {
349 		return (CKR_ARGUMENTS_BAD);
350 	}
351 
352 	remainder = ulDataLen & (AES_BLOCK_LEN - 1);
353 
354 	/*
355 	 * CTR, CCM, CMAC, and GCM modes do not require the plaintext
356 	 * to be a multiple of the AES block size. CKM_AES_CBC_PAD as the
357 	 * name suggests pads it's output, so it can also accept any
358 	 * size plaintext.
359 	 */
360 	switch (mech) {
361 	case CKM_AES_CBC_PAD:
362 	case CKM_AES_CMAC:
363 	case CKM_AES_CMAC_GENERAL:
364 	case CKM_AES_CTR:
365 	case CKM_AES_CCM:
366 	case CKM_AES_GCM:
367 		break;
368 	default:
369 		if (remainder != 0) {
370 			rv = CKR_DATA_LEN_RANGE;
371 			goto cleanup;
372 		}
373 	}
374 
375 	switch (aes_ctx->ac_flags & (CMAC_MODE|CCM_MODE|GCM_MODE)) {
376 	case CCM_MODE:
377 		length_needed = ulDataLen + aes_ctx->ac_mac_len;
378 		break;
379 	case GCM_MODE:
380 		length_needed = ulDataLen + aes_ctx->ac_tag_len;
381 		break;
382 	case CMAC_MODE:
383 		length_needed = AES_BLOCK_LEN;
384 		break;
385 	default:
386 		length_needed = ulDataLen;
387 
388 		/* CKM_AES_CBC_PAD out pads to a multiple of AES_BLOCK_LEN */
389 		if (mech == CKM_AES_CBC_PAD) {
390 			length_needed += AES_BLOCK_LEN - remainder;
391 		}
392 	}
393 
394 	if (pEncryptedData == NULL) {
395 		/*
396 		 * The application can ask for the size of the output buffer
397 		 * with a NULL output buffer (pEncryptedData).
398 		 * C_Encrypt() guarantees pulEncryptedDataLen != NULL.
399 		 */
400 		*pulEncryptedDataLen = length_needed;
401 		return (CKR_OK);
402 	}
403 
404 	if (*pulEncryptedDataLen < length_needed) {
405 		*pulEncryptedDataLen = length_needed;
406 		return (CKR_BUFFER_TOO_SMALL);
407 	}
408 
409 	if (ulDataLen > 0) {
410 		rv = soft_aes_encrypt_update(session_p, pData, ulDataLen,
411 		    pEncryptedData, pulEncryptedDataLen);
412 
413 		if (rv != CKR_OK) {
414 			rv = CKR_FUNCTION_FAILED;
415 			goto cleanup;
416 		}
417 
418 		/*
419 		 * Some modes (e.g. CCM and GCM) will append data such as a MAC
420 		 * to the ciphertext after the plaintext has been encrypted.
421 		 * Update out to reflect the amount of data in pEncryptedData
422 		 * after encryption.
423 		 */
424 		out.cd_offset = *pulEncryptedDataLen;
425 	}
426 
427 	/*
428 	 * As CKM_AES_CTR is a stream cipher, ctr_mode_final is always
429 	 * invoked in the _update() functions, so we do not need to call it
430 	 * here.
431 	 */
432 	if (mech == CKM_AES_CBC_PAD) {
433 		/*
434 		 * aes_encrypt_contiguous_blocks() accumulates plaintext
435 		 * in aes_ctx and then encrypts once it has accumulated
436 		 * a multiple of AES_BLOCK_LEN bytes of plaintext (through one
437 		 * or more calls).  Any leftover plaintext is left in aes_ctx
438 		 * for subsequent calls.  If there is any remaining plaintext
439 		 * at the end, we pad it out to to AES_BLOCK_LEN using the
440 		 * amount of padding to add as the value of the pad bytes
441 		 * (i.e. PKCS#7 padding) and call
442 		 * aes_encrypt_contiguous_blocks() one last time.
443 		 *
444 		 * Even when the input is already a multiple of AES_BLOCK_LEN,
445 		 * we must add an additional full block so that we can determine
446 		 * the amount of padding to remove during decryption (by
447 		 * examining the last byte of the decrypted ciphertext).
448 		 */
449 		size_t amt = AES_BLOCK_LEN - remainder;
450 		char block[AES_BLOCK_LEN];
451 
452 		ASSERT3U(remainder, ==, aes_ctx->ac_remainder_len);
453 		ASSERT3U(amt + remainder, ==, AES_BLOCK_LEN);
454 
455 		/*
456 		 * The existing soft_add_pkcs7_padding() interface is
457 		 * overkill for what is effectively a memset().  A better
458 		 * RFE would be to create a CBC_PAD mode.
459 		 */
460 		(void) memset(block, amt & 0xff, sizeof (block));
461 		rc = aes_encrypt_contiguous_blocks(aes_ctx, block, amt, &out);
462 	} else if (aes_ctx->ac_flags & CCM_MODE) {
463 		rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &out,
464 		    AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
465 	} else if (aes_ctx->ac_flags & GCM_MODE) {
466 		rc = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, &out,
467 		    AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
468 		    aes_xor_block);
469 	} else if (aes_ctx->ac_flags & CMAC_MODE) {
470 		rc = cmac_mode_final((cbc_ctx_t *)aes_ctx, &out,
471 		    aes_encrypt_block, aes_xor_block);
472 		aes_ctx->ac_remainder_len = 0;
473 	}
474 
475 cleanup:
476 	if (rc != CRYPTO_SUCCESS && rv == CKR_OK) {
477 		*pulEncryptedDataLen = 0;
478 		rv = crypto2pkcs11_error_number(rc);
479 	}
480 
481 	(void) pthread_mutex_lock(&session_p->session_mutex);
482 	soft_aes_free_ctx(aes_ctx);
483 	session_p->encrypt.context = NULL;
484 	(void) pthread_mutex_unlock(&session_p->session_mutex);
485 
486 	if (rv == CKR_OK) {
487 		*pulEncryptedDataLen = out.cd_offset;
488 	}
489 
490 	return (rv);
491 }
492 
493 CK_RV
494 soft_aes_decrypt(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
495     CK_ULONG ulEncryptedDataLen, CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen)
496 {
497 	aes_ctx_t *aes_ctx = session_p->decrypt.context;
498 	CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
499 	size_t length_needed;
500 	size_t remainder;
501 	int rc = CRYPTO_SUCCESS;
502 	CK_RV rv = CKR_OK;
503 	crypto_data_t out = {
504 		.cd_format = CRYPTO_DATA_RAW,
505 		.cd_offset = 0,
506 		.cd_length = *pulDataLen,
507 		.cd_raw.iov_base = (char *)pData,
508 		.cd_raw.iov_len = *pulDataLen
509 	};
510 
511 	/*
512 	 * A bit unusual, but it's permissible for ccm and gcm modes to not
513 	 * decrypt any data.  This ends up being equivalent to CKM_AES_CMAC
514 	 * or CKM_AES_GMAC of the additional authenticated data (AAD).
515 	 */
516 	if ((pEncryptedData == NULL || ulEncryptedDataLen == 0) &&
517 	    !(aes_ctx->ac_flags & (CCM_MODE|GCM_MODE))) {
518 		return (CKR_ARGUMENTS_BAD);
519 	}
520 
521 	remainder = ulEncryptedDataLen & (AES_BLOCK_LEN - 1);
522 
523 	/*
524 	 * CTR, CCM, CMAC, and GCM modes do not require the ciphertext
525 	 * to be a multiple of the AES block size.  Note that while
526 	 * CKM_AES_CBC_PAD accepts an arbitrary sized plaintext, the
527 	 * ciphertext is always a multiple of the AES block size
528 	 */
529 	switch (mech) {
530 	case CKM_AES_CMAC:
531 	case CKM_AES_CMAC_GENERAL:
532 	case CKM_AES_CTR:
533 	case CKM_AES_CCM:
534 	case CKM_AES_GCM:
535 		break;
536 	default:
537 		if (remainder != 0) {
538 			rv = CKR_DATA_LEN_RANGE;
539 			goto cleanup;
540 		}
541 	}
542 
543 	switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) {
544 	case CCM_MODE:
545 		length_needed = aes_ctx->ac_processed_data_len;
546 		break;
547 	case GCM_MODE:
548 		length_needed = ulEncryptedDataLen - aes_ctx->ac_tag_len;
549 		break;
550 	default:
551 		/*
552 		 * Note: for CKM_AES_CBC_PAD, we cannot know exactly how much
553 		 * space is needed for the plaintext until after we decrypt it.
554 		 * However, it is permissible to return a value 'somewhat'
555 		 * larger than necessary (PKCS#11 Base Specification, sec 5.2).
556 		 *
557 		 * Since CKM_AES_CBC_PAD adds at most AES_BLOCK_LEN bytes to
558 		 * the plaintext, we report the ciphertext length as the
559 		 * required plaintext length.  This means we specify at most
560 		 * AES_BLOCK_LEN additional bytes of memory for the plaintext.
561 		 *
562 		 * This behavior is slightly different from the earlier
563 		 * version of this code which returned the value of
564 		 * (ulEncryptedDataLen - AES_BLOCK_LEN), which was only ever
565 		 * correct when the original plaintext was already a multiple
566 		 * of AES_BLOCK_LEN (i.e. when AES_BLOCK_LEN of padding was
567 		 * added).  This should not be a concern for existing
568 		 * consumers -- if they were previously using the value of
569 		 * *pulDataLen to size the outbut buffer, the resulting
570 		 * plaintext would be truncated anytime the original plaintext
571 		 * wasn't a multiple of AES_BLOCK_LEN.  No consumer should
572 		 * be relying on such wrong behavior.  More likely they are
573 		 * using the size of the ciphertext or larger for the
574 		 * buffer to hold the decrypted plaintext (which is always
575 		 * acceptable).
576 		 */
577 		length_needed = ulEncryptedDataLen;
578 	}
579 
580 	if (pData == NULL) {
581 		/*
582 		 * The application can ask for the size of the output buffer
583 		 * with a NULL output buffer (pData).
584 		 * C_Decrypt() guarantees pulDataLen != NULL.
585 		 */
586 		*pulDataLen = length_needed;
587 		return (CKR_OK);
588 	}
589 
590 	if (*pulDataLen < length_needed) {
591 		*pulDataLen = length_needed;
592 		return (CKR_BUFFER_TOO_SMALL);
593 	}
594 
595 	if (ulEncryptedDataLen > 0) {
596 		rv = soft_aes_decrypt_update(session_p, pEncryptedData,
597 		    ulEncryptedDataLen, pData, pulDataLen);
598 	}
599 
600 	if (rv != CKR_OK) {
601 		rv = CKR_FUNCTION_FAILED;
602 		goto cleanup;
603 	}
604 
605 	/*
606 	 * Some modes (e.g. CCM and GCM) will output additional data
607 	 * after the plaintext (such as the MAC).  Update out to
608 	 * reflect the amount of data in pData for the _final() functions.
609 	 */
610 	out.cd_offset = *pulDataLen;
611 
612 	/*
613 	 * As CKM_AES_CTR is a stream cipher, ctr_mode_final is always
614 	 * invoked in the _update() functions, so we do not need to call it
615 	 * here.
616 	 */
617 	if (mech == CKM_AES_CBC_PAD) {
618 		rv = soft_remove_pkcs7_padding(pData, *pulDataLen, pulDataLen);
619 	} else if (aes_ctx->ac_flags & CCM_MODE) {
620 		ASSERT3U(aes_ctx->ac_processed_data_len, ==,
621 		    aes_ctx->ac_data_len);
622 		ASSERT3U(aes_ctx->ac_processed_mac_len, ==,
623 		    aes_ctx->ac_mac_len);
624 
625 		rc = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, &out,
626 		    AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
627 		    aes_xor_block);
628 	} else if (aes_ctx->ac_flags & GCM_MODE) {
629 		rc = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, &out,
630 		    AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
631 	}
632 
633 cleanup:
634 	if (rc != CRYPTO_SUCCESS && rv == CKR_OK) {
635 		rv = crypto2pkcs11_error_number(rc);
636 		*pulDataLen = 0;
637 	}
638 
639 	if (rv == CKR_OK) {
640 		*pulDataLen = out.cd_offset;
641 	}
642 
643 	(void) pthread_mutex_lock(&session_p->session_mutex);
644 	soft_aes_free_ctx(aes_ctx);
645 	session_p->decrypt.context = NULL;
646 	(void) pthread_mutex_unlock(&session_p->session_mutex);
647 
648 	return (rv);
649 }
650 
651 CK_RV
652 soft_aes_encrypt_update(soft_session_t *session_p, CK_BYTE_PTR pData,
653     CK_ULONG ulDataLen, CK_BYTE_PTR pEncryptedData,
654     CK_ULONG_PTR pulEncryptedDataLen)
655 {
656 	aes_ctx_t *aes_ctx = session_p->encrypt.context;
657 	crypto_data_t out = {
658 		.cd_format = CRYPTO_DATA_RAW,
659 		.cd_offset = 0,
660 		.cd_length = *pulEncryptedDataLen,
661 		.cd_raw.iov_base = (char *)pEncryptedData,
662 		.cd_raw.iov_len = *pulEncryptedDataLen
663 	};
664 	CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
665 	CK_RV rv = CKR_OK;
666 	size_t out_len = aes_ctx->ac_remainder_len + ulDataLen;
667 	int rc;
668 
669 	/* Check size of the output buffer */
670 	if (mech == CKM_AES_CBC_PAD && (out_len <= AES_BLOCK_LEN)) {
671 		/*
672 		 * Since there is currently no CBC_PAD mode, we must stash any
673 		 * remainder ourselves.  For all other modes,
674 		 * aes_encrypt_contiguous_blocks() will call the mode specific
675 		 * encrypt function and will stash any reminder if required.
676 		 */
677 		if (pData != NULL) {
678 			uint8_t *dest = (uint8_t *)aes_ctx->ac_remainder +
679 			    aes_ctx->ac_remainder_len;
680 
681 			bcopy(pData, dest, ulDataLen);
682 			aes_ctx->ac_remainder_len += ulDataLen;
683 		}
684 
685 		*pulEncryptedDataLen = 0;
686 		return (CKR_OK);
687 	} else if (aes_ctx->ac_flags & CMAC_MODE) {
688 		/*
689 		 * The underlying CMAC implementation handles the storing of
690 		 * extra bytes and does not output any data until *_final,
691 		 * so do not bother looking at the size of the output
692 		 * buffer at this time.
693 		 */
694 		if (pData == NULL) {
695 			*pulEncryptedDataLen = 0;
696 			return (CKR_OK);
697 		}
698 	} else {
699 		/*
700 		 * The number of complete blocks we can encrypt right now.
701 		 * The underlying implementation will buffer any remaining data
702 		 * until the next *_update call.
703 		 */
704 		out_len &= ~(AES_BLOCK_LEN - 1);
705 
706 		if (pEncryptedData == NULL) {
707 			*pulEncryptedDataLen = out_len;
708 			return (CKR_OK);
709 		}
710 
711 		if (*pulEncryptedDataLen < out_len) {
712 			*pulEncryptedDataLen = out_len;
713 			return (CKR_BUFFER_TOO_SMALL);
714 		}
715 	}
716 
717 	rc = aes_encrypt_contiguous_blocks(aes_ctx, (char *)pData, ulDataLen,
718 	    &out);
719 
720 	/*
721 	 * Since out.cd_offset is set to 0 initially and the underlying
722 	 * implementation increments out.cd_offset by the amount of output
723 	 * written, so we can just use the value as the amount written.
724 	 */
725 	*pulEncryptedDataLen = out.cd_offset;
726 
727 	if (rc != CRYPTO_SUCCESS) {
728 		rv = CKR_FUNCTION_FAILED;
729 		goto done;
730 	}
731 
732 	/*
733 	 * Since AES counter mode is a stream cipher, we call ctr_mode_final()
734 	 * to pick up any remaining bytes.  It is an internal function that
735 	 * does not destroy the context like *normal* final routines.
736 	 */
737 	if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
738 		rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &out,
739 		    aes_encrypt_block);
740 	}
741 
742 done:
743 	if (rc != CRYPTO_SUCCESS && rv == CKR_OK) {
744 		rv = crypto2pkcs11_error_number(rc);
745 	}
746 
747 	return (rv);
748 }
749 
750 CK_RV
751 soft_aes_decrypt_update(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
752     CK_ULONG ulEncryptedDataLen, CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen)
753 {
754 	aes_ctx_t *aes_ctx = session_p->decrypt.context;
755 	crypto_data_t out = {
756 		.cd_format = CRYPTO_DATA_RAW,
757 		.cd_offset = 0,
758 		.cd_length = *pulDataLen,
759 		.cd_raw.iov_base = (char *)pData,
760 		.cd_raw.iov_len = *pulDataLen
761 	};
762 	CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
763 	CK_RV rv = CKR_OK;
764 	size_t out_len = 0;
765 	int rc = CRYPTO_SUCCESS;
766 
767 	if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) == 0) {
768 		out_len = aes_ctx->ac_remainder_len + ulEncryptedDataLen;
769 
770 		if (mech == CKM_AES_CBC_PAD && out_len <= AES_BLOCK_LEN) {
771 			uint8_t *dest = (uint8_t *)aes_ctx->ac_remainder +
772 			    aes_ctx->ac_remainder_len;
773 
774 			bcopy(pEncryptedData, dest, ulEncryptedDataLen);
775 			aes_ctx->ac_remainder_len += ulEncryptedDataLen;
776 			return (CKR_OK);
777 		}
778 		out_len &= ~(AES_BLOCK_LEN - 1);
779 	}
780 
781 	if (pData == NULL) {
782 		*pulDataLen = out_len;
783 		return (CKR_OK);
784 	}
785 
786 	if (*pulDataLen < out_len) {
787 		*pulDataLen = out_len;
788 		return (CKR_BUFFER_TOO_SMALL);
789 	}
790 
791 	rc = aes_decrypt_contiguous_blocks(aes_ctx, (char *)pEncryptedData,
792 	    ulEncryptedDataLen, &out);
793 
794 	if (rc != CRYPTO_SUCCESS) {
795 		rv = CKR_FUNCTION_FAILED;
796 		goto done;
797 	}
798 
799 	*pulDataLen = out.cd_offset;
800 
801 	if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
802 		rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &out,
803 		    aes_encrypt_block);
804 	}
805 
806 done:
807 	if (rc != CRYPTO_SUCCESS && rv == CKR_OK) {
808 		rv = crypto2pkcs11_error_number(rc);
809 	}
810 
811 	return (rv);
812 }
813 
814 CK_RV
815 soft_aes_encrypt_final(soft_session_t *session_p,
816     CK_BYTE_PTR pLastEncryptedPart, CK_ULONG_PTR pulLastEncryptedPartLen)
817 {
818 	aes_ctx_t *aes_ctx = session_p->encrypt.context;
819 	crypto_data_t data = {
820 		.cd_format = CRYPTO_DATA_RAW,
821 		.cd_offset = 0,
822 		.cd_length = *pulLastEncryptedPartLen,
823 		.cd_raw.iov_base = (char *)pLastEncryptedPart,
824 		.cd_raw.iov_len = *pulLastEncryptedPartLen
825 	};
826 	int rc = CRYPTO_SUCCESS;
827 	CK_RV rv = CKR_OK;
828 
829 	if (session_p->encrypt.mech.mechanism == CKM_AES_CBC_PAD) {
830 		char block[AES_BLOCK_LEN] = { 0 };
831 		size_t padlen = AES_BLOCK_LEN - aes_ctx->ac_remainder_len;
832 
833 		(void) memset(block, padlen & 0xff, sizeof (block));
834 		if (padlen > 0) {
835 			rc = aes_encrypt_contiguous_blocks(aes_ctx, block,
836 			    padlen, &data);
837 		}
838 	} else if (aes_ctx->ac_flags & CTR_MODE) {
839 		if (pLastEncryptedPart == NULL) {
840 			*pulLastEncryptedPartLen = aes_ctx->ac_remainder_len;
841 			return (CKR_OK);
842 		}
843 
844 		if (aes_ctx->ac_remainder_len > 0) {
845 			rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &data,
846 			    aes_encrypt_block);
847 			if (rc == CRYPTO_BUFFER_TOO_SMALL) {
848 				rv = CKR_BUFFER_TOO_SMALL;
849 			}
850 		}
851 	} else if (aes_ctx->ac_flags & CCM_MODE) {
852 		rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &data,
853 		    AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
854 	} else if (aes_ctx->ac_flags & GCM_MODE) {
855 		rc = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, &data,
856 		    AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
857 		    aes_xor_block);
858 	} else if (aes_ctx->ac_flags & CMAC_MODE) {
859 		if (pLastEncryptedPart == NULL) {
860 			*pulLastEncryptedPartLen = AES_BLOCK_LEN;
861 			return (CKR_OK);
862 		}
863 
864 		rc = cmac_mode_final((cbc_ctx_t *)aes_ctx, &data,
865 		    aes_encrypt_block, aes_xor_block);
866 	} else {
867 		/*
868 		 * There must be no unprocessed plaintext.
869 		 * This happens if the length of the last data is not a
870 		 * multiple of the AES block length.
871 		 */
872 		*pulLastEncryptedPartLen = 0;
873 		if (aes_ctx->ac_remainder_len > 0) {
874 			rv = CKR_DATA_LEN_RANGE;
875 		}
876 	}
877 
878 	if (rc != CRYPTO_SUCCESS && rv == CKR_OK) {
879 		rv = crypto2pkcs11_error_number(rc);
880 	}
881 
882 	soft_aes_free_ctx(aes_ctx);
883 	session_p->encrypt.context = NULL;
884 	return (rv);
885 }
886 
887 CK_RV
888 soft_aes_decrypt_final(soft_session_t *session_p, CK_BYTE_PTR pLastPart,
889     CK_ULONG_PTR pulLastPartLen)
890 {
891 	aes_ctx_t *aes_ctx = session_p->decrypt.context;
892 	CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
893 	CK_RV rv = CKR_OK;
894 	int rc = CRYPTO_SUCCESS;
895 	crypto_data_t out = {
896 		.cd_format = CRYPTO_DATA_RAW,
897 		.cd_offset = 0,
898 		.cd_length = *pulLastPartLen,
899 		.cd_raw.iov_base = (char *)pLastPart,
900 		.cd_raw.iov_len = *pulLastPartLen
901 	};
902 
903 	if (aes_ctx->ac_remainder_len > 0) {
904 		switch (mech) {
905 		case CKM_AES_CBC_PAD:
906 			/*
907 			 * Since we cannot know the amount of padding present
908 			 * until after we decrypt the final block, and since
909 			 * we don't know which block is the last block until
910 			 * C_DecryptFinal() is called, we must always defer
911 			 * decrypting the most recent block of ciphertext
912 			 * until C_DecryptFinal() is called.  As a consequence,
913 			 * we should always have a remainder, and it should
914 			 * always be equal to AES_BLOCK_LEN.
915 			 */
916 			if (aes_ctx->ac_remainder_len != AES_BLOCK_LEN) {
917 				return (CKR_ENCRYPTED_DATA_LEN_RANGE);
918 			}
919 
920 			if (*pulLastPartLen < AES_BLOCK_LEN) {
921 				*pulLastPartLen = AES_BLOCK_LEN;
922 				return (CKR_BUFFER_TOO_SMALL);
923 			}
924 
925 			rc = aes_decrypt_contiguous_blocks(aes_ctx,
926 			    (char *)pLastPart, AES_BLOCK_LEN, &out);
927 
928 			if (rc != CRYPTO_SUCCESS) {
929 				break;
930 			}
931 
932 			rv = soft_remove_pkcs7_padding(pLastPart, AES_BLOCK_LEN,
933 			    pulLastPartLen);
934 			break;
935 		case CKM_AES_CTR:
936 			rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &out,
937 			    aes_encrypt_block);
938 			break;
939 		default:
940 			/* There must be no unprocessed ciphertext */
941 			return (CKR_ENCRYPTED_DATA_LEN_RANGE);
942 		}
943 	} else {
944 		/*
945 		 * We should never have no remainder for AES_CBC_PAD -- see
946 		 * above.
947 		 */
948 		ASSERT3U(mech, !=, CKM_AES_CBC_PAD);
949 	}
950 
951 	if (aes_ctx->ac_flags & CCM_MODE) {
952 		size_t pt_len = aes_ctx->ac_data_len;
953 
954 		if (*pulLastPartLen < pt_len) {
955 			*pulLastPartLen = pt_len;
956 			return (CKR_BUFFER_TOO_SMALL);
957 		}
958 
959 		ASSERT3U(aes_ctx->ac_processed_data_len, ==, pt_len);
960 		ASSERT3U(aes_ctx->ac_processed_mac_len, ==,
961 		    aes_ctx->ac_mac_len);
962 
963 		rc = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, &out,
964 		    AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
965 		    aes_xor_block);
966 
967 		if (rc != CRYPTO_SUCCESS) {
968 			*pulLastPartLen = out.cd_offset;
969 		}
970 	} else if (aes_ctx->ac_flags & GCM_MODE) {
971 		gcm_ctx_t *gcm_ctx = (gcm_ctx_t *)aes_ctx;
972 		size_t pt_len = gcm_ctx->gcm_processed_data_len -
973 		    gcm_ctx->gcm_tag_len;
974 
975 		if (*pulLastPartLen < pt_len) {
976 			*pulLastPartLen = pt_len;
977 			return (CKR_BUFFER_TOO_SMALL);
978 		}
979 
980 		rc = gcm_decrypt_final(gcm_ctx, &out, AES_BLOCK_LEN,
981 		    aes_encrypt_block, aes_xor_block);
982 
983 		if (rc != CRYPTO_SUCCESS) {
984 			*pulLastPartLen = out.cd_offset;
985 		}
986 	}
987 
988 	if (rv == CKR_OK && rc != CRYPTO_SUCCESS) {
989 		rv = crypto2pkcs11_error_number(rc);
990 	}
991 
992 	soft_aes_free_ctx(aes_ctx);
993 	session_p->decrypt.context = NULL;
994 
995 	return (rv);
996 }
997 
998 /*
999  * Allocate and initialize AES contexts for sign and verify operations
1000  * (including the underlying encryption context needed to sign or verify) --
1001  * called by C_SignInit() and C_VerifyInit() to perform the CKM_AES_* MAC
1002  * mechanisms. For general-length AES MAC, also validate the MAC length.
1003  */
1004 CK_RV
1005 soft_aes_sign_verify_init_common(soft_session_t *session_p,
1006     CK_MECHANISM_PTR pMechanism, soft_object_t *key_p, boolean_t sign_op)
1007 {
1008 	soft_aes_sign_ctx_t	*ctx = NULL;
1009 	/* For AES CMAC (the only AES MAC currently), iv is always 0 */
1010 	CK_BYTE		iv[AES_BLOCK_LEN] = { 0 };
1011 	CK_MECHANISM	encrypt_mech = {
1012 		.mechanism = CKM_AES_CMAC,
1013 		.pParameter = iv,
1014 		.ulParameterLen = sizeof (iv)
1015 	};
1016 	CK_RV		rv;
1017 	size_t		mac_len = AES_BLOCK_LEN;
1018 
1019 	if (key_p->key_type != CKK_AES)
1020 		return (CKR_KEY_TYPE_INCONSISTENT);
1021 
1022 	/* C_{Sign,Verify}Init() validate pMechanism != NULL */
1023 	if (pMechanism->mechanism == CKM_AES_CMAC_GENERAL) {
1024 		if (pMechanism->pParameter == NULL) {
1025 			return (CKR_MECHANISM_PARAM_INVALID);
1026 		}
1027 
1028 		mac_len = *(CK_MAC_GENERAL_PARAMS *)pMechanism->pParameter;
1029 
1030 		if (mac_len > AES_BLOCK_LEN) {
1031 			return (CKR_MECHANISM_PARAM_INVALID);
1032 		}
1033 	}
1034 
1035 	ctx = calloc(1, sizeof (*ctx));
1036 	if (ctx == NULL) {
1037 		return (CKR_HOST_MEMORY);
1038 	}
1039 
1040 	rv = soft_aes_check_mech_param(pMechanism, &ctx->aes_ctx);
1041 	if (rv != CKR_OK) {
1042 		soft_aes_free_ctx(ctx->aes_ctx);
1043 		goto done;
1044 	}
1045 
1046 	if ((rv = soft_encrypt_init_internal(session_p, &encrypt_mech,
1047 	    key_p)) != CKR_OK) {
1048 		soft_aes_free_ctx(ctx->aes_ctx);
1049 		goto done;
1050 	}
1051 
1052 	ctx->mac_len = mac_len;
1053 
1054 	(void) pthread_mutex_lock(&session_p->session_mutex);
1055 
1056 	if (sign_op) {
1057 		session_p->sign.context = ctx;
1058 		session_p->sign.mech.mechanism = pMechanism->mechanism;
1059 	} else {
1060 		session_p->verify.context = ctx;
1061 		session_p->verify.mech.mechanism = pMechanism->mechanism;
1062 	}
1063 
1064 	(void) pthread_mutex_unlock(&session_p->session_mutex);
1065 
1066 done:
1067 	if (rv != CKR_OK) {
1068 		soft_aes_free_ctx(ctx->aes_ctx);
1069 		free(ctx);
1070 	}
1071 
1072 	return (rv);
1073 }
1074 
1075 CK_RV
1076 soft_aes_sign_verify_common(soft_session_t *session_p, CK_BYTE_PTR pData,
1077     CK_ULONG ulDataLen, CK_BYTE_PTR pSigned, CK_ULONG_PTR pulSignedLen,
1078     boolean_t sign_op, boolean_t Final)
1079 {
1080 	soft_aes_sign_ctx_t	*soft_aes_ctx_sign_verify;
1081 	CK_RV			rv;
1082 	CK_BYTE			*pEncrypted = NULL;
1083 	CK_ULONG		ulEncryptedLen = AES_BLOCK_LEN;
1084 	CK_BYTE			last_block[AES_BLOCK_LEN];
1085 
1086 	if (sign_op) {
1087 		soft_aes_ctx_sign_verify =
1088 		    (soft_aes_sign_ctx_t *)session_p->sign.context;
1089 
1090 		if (soft_aes_ctx_sign_verify->mac_len == 0) {
1091 			*pulSignedLen = 0;
1092 			goto clean_exit;
1093 		}
1094 
1095 		/* Application asks for the length of the output buffer. */
1096 		if (pSigned == NULL) {
1097 			*pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
1098 			return (CKR_OK);
1099 		}
1100 
1101 		/* Is the application-supplied buffer large enough? */
1102 		if (*pulSignedLen < soft_aes_ctx_sign_verify->mac_len) {
1103 			*pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
1104 			return (CKR_BUFFER_TOO_SMALL);
1105 		}
1106 	} else {
1107 		soft_aes_ctx_sign_verify =
1108 		    (soft_aes_sign_ctx_t *)session_p->verify.context;
1109 	}
1110 
1111 	if (Final) {
1112 		rv = soft_encrypt_final(session_p, last_block,
1113 		    &ulEncryptedLen);
1114 	} else {
1115 		rv = soft_encrypt(session_p, pData, ulDataLen,
1116 		    last_block, &ulEncryptedLen);
1117 	}
1118 
1119 	if (rv == CKR_OK) {
1120 		*pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
1121 
1122 		/* the leftmost mac_len bytes of last_block is our MAC */
1123 		(void) memcpy(pSigned, last_block, *pulSignedLen);
1124 	}
1125 
1126 clean_exit:
1127 
1128 	(void) pthread_mutex_lock(&session_p->session_mutex);
1129 
1130 	/* soft_encrypt_common() has freed the encrypt context */
1131 	if (sign_op) {
1132 		free(session_p->sign.context);
1133 		session_p->sign.context = NULL;
1134 	} else {
1135 		free(session_p->verify.context);
1136 		session_p->verify.context = NULL;
1137 	}
1138 	session_p->encrypt.flags = 0;
1139 
1140 	(void) pthread_mutex_unlock(&session_p->session_mutex);
1141 
1142 	if (pEncrypted) {
1143 		free(pEncrypted);
1144 	}
1145 
1146 	return (rv);
1147 }
1148 
1149 /*
1150  * Called by soft_sign_update()
1151  */
1152 CK_RV
1153 soft_aes_mac_sign_verify_update(soft_session_t *session_p, CK_BYTE_PTR pPart,
1154     CK_ULONG ulPartLen)
1155 {
1156 	CK_BYTE		buf[AES_BLOCK_LEN];
1157 	CK_ULONG	ulEncryptedLen = AES_BLOCK_LEN;
1158 	CK_RV		rv;
1159 
1160 	rv = soft_encrypt_update(session_p, pPart, ulPartLen,
1161 	    buf, &ulEncryptedLen);
1162 	explicit_bzero(buf, sizeof (buf));
1163 
1164 	return (rv);
1165 }
1166 
1167 void
1168 soft_aes_free_ctx(aes_ctx_t *ctx)
1169 {
1170 	size_t len = 0;
1171 
1172 	if (ctx == NULL)
1173 		return;
1174 
1175 	if (ctx->ac_flags & ECB_MODE) {
1176 		len = sizeof (ecb_ctx_t);
1177 	} else if (ctx->ac_flags & (CBC_MODE|CMAC_MODE)) {
1178 		len = sizeof (cbc_ctx_t);
1179 	} else if (ctx->ac_flags & CTR_MODE) {
1180 		len = sizeof (ctr_ctx_t);
1181 	} else if (ctx->ac_flags & CCM_MODE) {
1182 		len = sizeof (ccm_ctx_t);
1183 	} else if (ctx->ac_flags & GCM_MODE) {
1184 		len = sizeof (gcm_ctx_t);
1185 	}
1186 
1187 	freezero(ctx->ac_keysched, ctx->ac_keysched_len);
1188 	freezero(ctx, len);
1189 }
1190