1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2018, Joyent, Inc.
24 */
25
26
27#ifndef _KERNEL
28#include <strings.h>
29#include <limits.h>
30#include <security/cryptoki.h>
31#endif	/* _KERNEL */
32
33#include <sys/debug.h>
34#include <sys/types.h>
35#include <sys/kmem.h>
36#include <modes/modes.h>
37#include <sys/crypto/common.h>
38#include <sys/crypto/impl.h>
39#include <sys/byteorder.h>
40
41#ifdef __amd64
42
43#ifdef _KERNEL
44#include <sys/cpuvar.h>		/* cpu_t, CPU */
45#include <sys/x86_archext.h>	/* x86_featureset, X86FSET_*, CPUID_* */
46#include <sys/disp.h>		/* kpreempt_disable(), kpreempt_enable */
47/* Workaround for no XMM kernel thread save/restore */
48#define	KPREEMPT_DISABLE	kpreempt_disable()
49#define	KPREEMPT_ENABLE		kpreempt_enable()
50
51#else
52#include <sys/auxv.h>		/* getisax() */
53#include <sys/auxv_386.h>	/* AV_386_PCLMULQDQ bit */
54#define	KPREEMPT_DISABLE
55#define	KPREEMPT_ENABLE
56#endif	/* _KERNEL */
57
58extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
59static int intel_pclmulqdq_instruction_present(void);
60#endif	/* __amd64 */
61
62struct aes_block {
63	uint64_t a;
64	uint64_t b;
65};
66
67
68/*
69 * gcm_mul()
70 * Perform a carry-less multiplication (that is, use XOR instead of the
71 * multiply operator) on *x_in and *y and place the result in *res.
72 *
73 * Byte swap the input (*x_in and *y) and the output (*res).
74 *
75 * Note: x_in, y, and res all point to 16-byte numbers (an array of two
76 * 64-bit integers).
77 */
78void
79gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
80{
81#ifdef __amd64
82	if (intel_pclmulqdq_instruction_present()) {
83		KPREEMPT_DISABLE;
84		gcm_mul_pclmulqdq(x_in, y, res);
85		KPREEMPT_ENABLE;
86	} else
87#endif	/* __amd64 */
88	{
89		static const uint64_t R = 0xe100000000000000ULL;
90		struct aes_block z = {0, 0};
91		struct aes_block v;
92		uint64_t x;
93		int i, j;
94
95		v.a = ntohll(y[0]);
96		v.b = ntohll(y[1]);
97
98		for (j = 0; j < 2; j++) {
99			x = ntohll(x_in[j]);
100			for (i = 0; i < 64; i++, x <<= 1) {
101				if (x & 0x8000000000000000ULL) {
102					z.a ^= v.a;
103					z.b ^= v.b;
104				}
105				if (v.b & 1ULL) {
106					v.b = (v.a << 63)|(v.b >> 1);
107					v.a = (v.a >> 1) ^ R;
108				} else {
109					v.b = (v.a << 63)|(v.b >> 1);
110					v.a = v.a >> 1;
111				}
112			}
113		}
114		res[0] = htonll(z.a);
115		res[1] = htonll(z.b);
116	}
117}
118
119
120#define	GHASH(c, d, t) \
121	xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
122	gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
123	(uint64_t *)(void *)(t));
124
125
126/*
127 * Encrypt multiple blocks of data in GCM mode.  Decrypt for GCM mode
128 * is done in another function.
129 */
130int
131gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
132    crypto_data_t *out, size_t block_size,
133    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
134    void (*copy_block)(uint8_t *, uint8_t *),
135    void (*xor_block)(uint8_t *, uint8_t *))
136{
137	size_t remainder = length;
138	size_t need;
139	uint8_t *datap = (uint8_t *)data;
140	uint8_t *blockp;
141	uint8_t *lastp;
142	void *iov_or_mp;
143	offset_t offset;
144	uint8_t *out_data_1;
145	uint8_t *out_data_2;
146	size_t out_data_1_len;
147	uint64_t counter;
148	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
149
150	if (length + ctx->gcm_remainder_len < block_size) {
151		/* accumulate bytes here and return */
152		bcopy(datap,
153		    (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
154		    length);
155		ctx->gcm_remainder_len += length;
156		ctx->gcm_copy_to = datap;
157		return (CRYPTO_SUCCESS);
158	}
159
160	lastp = (uint8_t *)ctx->gcm_cb;
161	if (out != NULL)
162		crypto_init_ptrs(out, &iov_or_mp, &offset);
163
164	do {
165		/* Unprocessed data from last call. */
166		if (ctx->gcm_remainder_len > 0) {
167			need = block_size - ctx->gcm_remainder_len;
168
169			if (need > remainder)
170				return (CRYPTO_DATA_LEN_RANGE);
171
172			bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
173			    [ctx->gcm_remainder_len], need);
174
175			blockp = (uint8_t *)ctx->gcm_remainder;
176		} else {
177			blockp = datap;
178		}
179
180		/*
181		 * Increment counter. Counter bits are confined
182		 * to the bottom 32 bits of the counter block.
183		 */
184		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
185		counter = htonll(counter + 1);
186		counter &= counter_mask;
187		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
188
189		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
190		    (uint8_t *)ctx->gcm_tmp);
191		xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
192
193		lastp = (uint8_t *)ctx->gcm_tmp;
194
195		ctx->gcm_processed_data_len += block_size;
196
197		if (out == NULL) {
198			if (ctx->gcm_remainder_len > 0) {
199				bcopy(blockp, ctx->gcm_copy_to,
200				    ctx->gcm_remainder_len);
201				bcopy(blockp + ctx->gcm_remainder_len, datap,
202				    need);
203			}
204		} else {
205			crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
206			    &out_data_1_len, &out_data_2, block_size);
207
208			/* copy block to where it belongs */
209			if (out_data_1_len == block_size) {
210				copy_block(lastp, out_data_1);
211			} else {
212				bcopy(lastp, out_data_1, out_data_1_len);
213				if (out_data_2 != NULL) {
214					bcopy(lastp + out_data_1_len,
215					    out_data_2,
216					    block_size - out_data_1_len);
217				}
218			}
219			/* update offset */
220			out->cd_offset += block_size;
221		}
222
223		/* add ciphertext to the hash */
224		GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
225
226		/* Update pointer to next block of data to be processed. */
227		if (ctx->gcm_remainder_len != 0) {
228			datap += need;
229			ctx->gcm_remainder_len = 0;
230		} else {
231			datap += block_size;
232		}
233
234		remainder = (size_t)&data[length] - (size_t)datap;
235
236		/* Incomplete last block. */
237		if (remainder > 0 && remainder < block_size) {
238			bcopy(datap, ctx->gcm_remainder, remainder);
239			ctx->gcm_remainder_len = remainder;
240			ctx->gcm_copy_to = datap;
241			goto out;
242		}
243		ctx->gcm_copy_to = NULL;
244
245	} while (remainder > 0);
246out:
247	return (CRYPTO_SUCCESS);
248}
249
250/* ARGSUSED */
251int
252gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
253    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
254    void (*copy_block)(uint8_t *, uint8_t *),
255    void (*xor_block)(uint8_t *, uint8_t *))
256{
257	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
258	uint8_t *ghash, *macp;
259	int i, rv;
260
261	if (out->cd_length <
262	    (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
263		return (CRYPTO_DATA_LEN_RANGE);
264	}
265
266	ghash = (uint8_t *)ctx->gcm_ghash;
267
268	if (ctx->gcm_remainder_len > 0) {
269		uint64_t counter;
270		uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
271
272		/*
273		 * Here is where we deal with data that is not a
274		 * multiple of the block size.
275		 */
276
277		/*
278		 * Increment counter.
279		 */
280		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
281		counter = htonll(counter + 1);
282		counter &= counter_mask;
283		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
284
285		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
286		    (uint8_t *)ctx->gcm_tmp);
287
288		macp = (uint8_t *)ctx->gcm_remainder;
289		bzero(macp + ctx->gcm_remainder_len,
290		    block_size - ctx->gcm_remainder_len);
291
292		/* XOR with counter block */
293		for (i = 0; i < ctx->gcm_remainder_len; i++) {
294			macp[i] ^= tmpp[i];
295		}
296
297		/* add ciphertext to the hash */
298		GHASH(ctx, macp, ghash);
299
300		ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
301	}
302
303	ctx->gcm_len_a_len_c[1] =
304	    htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
305	GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
306	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
307	    (uint8_t *)ctx->gcm_J0);
308	xor_block((uint8_t *)ctx->gcm_J0, ghash);
309
310	if (ctx->gcm_remainder_len > 0) {
311		rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
312		if (rv != CRYPTO_SUCCESS)
313			return (rv);
314	}
315	out->cd_offset += ctx->gcm_remainder_len;
316	ctx->gcm_remainder_len = 0;
317	rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
318	if (rv != CRYPTO_SUCCESS)
319		return (rv);
320	out->cd_offset += ctx->gcm_tag_len;
321
322	return (CRYPTO_SUCCESS);
323}
324
325/*
326 * This will only deal with decrypting the last block of the input that
327 * might not be a multiple of block length.
328 */
329static void
330gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
331    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
332    void (*xor_block)(uint8_t *, uint8_t *))
333{
334	uint8_t *datap, *outp, *counterp;
335	uint64_t counter;
336	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
337	int i;
338
339	/*
340	 * Increment counter.
341	 * Counter bits are confined to the bottom 32 bits
342	 */
343	counter = ntohll(ctx->gcm_cb[1] & counter_mask);
344	counter = htonll(counter + 1);
345	counter &= counter_mask;
346	ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
347
348	datap = (uint8_t *)ctx->gcm_remainder;
349	outp = &((ctx->gcm_pt_buf)[index]);
350	counterp = (uint8_t *)ctx->gcm_tmp;
351
352	/* authentication tag */
353	bzero((uint8_t *)ctx->gcm_tmp, block_size);
354	bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
355
356	/* add ciphertext to the hash */
357	GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
358
359	/* decrypt remaining ciphertext */
360	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
361
362	/* XOR with counter block */
363	for (i = 0; i < ctx->gcm_remainder_len; i++) {
364		outp[i] = datap[i] ^ counterp[i];
365	}
366}
367
368/* ARGSUSED */
369int
370gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
371    crypto_data_t *out, size_t block_size,
372    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
373    void (*copy_block)(uint8_t *, uint8_t *),
374    void (*xor_block)(uint8_t *, uint8_t *))
375{
376	size_t new_len;
377	uint8_t *new;
378
379	/*
380	 * Copy contiguous ciphertext input blocks to plaintext buffer.
381	 * Ciphertext will be decrypted in the final.
382	 */
383	if (length > 0) {
384		new_len = ctx->gcm_pt_buf_len + length;
385#ifdef _KERNEL
386		new = kmem_alloc(new_len, ctx->gcm_kmflag);
387		bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
388		kmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
389#else
390		new = malloc(new_len);
391		bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
392		free(ctx->gcm_pt_buf);
393#endif
394		if (new == NULL)
395			return (CRYPTO_HOST_MEMORY);
396
397		ctx->gcm_pt_buf = new;
398		ctx->gcm_pt_buf_len = new_len;
399		bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
400		    length);
401		ctx->gcm_processed_data_len += length;
402	}
403
404	ctx->gcm_remainder_len = 0;
405	return (CRYPTO_SUCCESS);
406}
407
408int
409gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
410    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
411    void (*xor_block)(uint8_t *, uint8_t *))
412{
413	size_t pt_len;
414	size_t remainder;
415	uint8_t *ghash;
416	uint8_t *blockp;
417	uint8_t *cbp;
418	uint64_t counter;
419	uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
420	int processed = 0, rv;
421
422	ASSERT3U(ctx->gcm_processed_data_len, ==, ctx->gcm_pt_buf_len);
423
424	pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
425	ghash = (uint8_t *)ctx->gcm_ghash;
426	blockp = ctx->gcm_pt_buf;
427	remainder = pt_len;
428	while (remainder > 0) {
429		/* Incomplete last block */
430		if (remainder < block_size) {
431			bcopy(blockp, ctx->gcm_remainder, remainder);
432			ctx->gcm_remainder_len = remainder;
433			/*
434			 * not expecting anymore ciphertext, just
435			 * compute plaintext for the remaining input
436			 */
437			gcm_decrypt_incomplete_block(ctx, block_size,
438			    processed, encrypt_block, xor_block);
439			ctx->gcm_remainder_len = 0;
440			goto out;
441		}
442		/* add ciphertext to the hash */
443		GHASH(ctx, blockp, ghash);
444
445		/*
446		 * Increment counter.
447		 * Counter bits are confined to the bottom 32 bits
448		 */
449		counter = ntohll(ctx->gcm_cb[1] & counter_mask);
450		counter = htonll(counter + 1);
451		counter &= counter_mask;
452		ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
453
454		cbp = (uint8_t *)ctx->gcm_tmp;
455		encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
456
457		/* XOR with ciphertext */
458		xor_block(cbp, blockp);
459
460		processed += block_size;
461		blockp += block_size;
462		remainder -= block_size;
463	}
464out:
465	ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
466	GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
467	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
468	    (uint8_t *)ctx->gcm_J0);
469	xor_block((uint8_t *)ctx->gcm_J0, ghash);
470
471	/* compare the input authentication tag with what we calculated */
472	if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
473		/* They don't match */
474		return (CRYPTO_INVALID_MAC);
475	} else {
476		rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
477		if (rv != CRYPTO_SUCCESS)
478			return (rv);
479		out->cd_offset += pt_len;
480	}
481	return (CRYPTO_SUCCESS);
482}
483
484static int
485gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
486{
487	size_t tag_len;
488
489	/*
490	 * Check the length of the authentication tag (in bits).
491	 */
492	tag_len = gcm_param->ulTagBits;
493	switch (tag_len) {
494	case 32:
495	case 64:
496	case 96:
497	case 104:
498	case 112:
499	case 120:
500	case 128:
501		break;
502	default:
503		return (CRYPTO_MECHANISM_PARAM_INVALID);
504	}
505
506	if (gcm_param->ulIvLen == 0)
507		return (CRYPTO_MECHANISM_PARAM_INVALID);
508
509	return (CRYPTO_SUCCESS);
510}
511
512static void
513gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
514    gcm_ctx_t *ctx, size_t block_size,
515    void (*copy_block)(uint8_t *, uint8_t *),
516    void (*xor_block)(uint8_t *, uint8_t *))
517{
518	uint8_t *cb;
519	ulong_t remainder = iv_len;
520	ulong_t processed = 0;
521	uint8_t *datap, *ghash;
522	uint64_t len_a_len_c[2];
523
524	ghash = (uint8_t *)ctx->gcm_ghash;
525	cb = (uint8_t *)ctx->gcm_cb;
526	if (iv_len == 12) {
527		bcopy(iv, cb, 12);
528		cb[12] = 0;
529		cb[13] = 0;
530		cb[14] = 0;
531		cb[15] = 1;
532		/* J0 will be used again in the final */
533		copy_block(cb, (uint8_t *)ctx->gcm_J0);
534	} else {
535		/* GHASH the IV */
536		do {
537			if (remainder < block_size) {
538				bzero(cb, block_size);
539				bcopy(&(iv[processed]), cb, remainder);
540				datap = (uint8_t *)cb;
541				remainder = 0;
542			} else {
543				datap = (uint8_t *)(&(iv[processed]));
544				processed += block_size;
545				remainder -= block_size;
546			}
547			GHASH(ctx, datap, ghash);
548		} while (remainder > 0);
549
550		len_a_len_c[0] = 0;
551		len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
552		GHASH(ctx, len_a_len_c, ctx->gcm_J0);
553
554		/* J0 will be used again in the final */
555		copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
556	}
557}
558
559/*
560 * The following function is called at encrypt or decrypt init time
561 * for AES GCM mode.
562 */
563int
564gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
565    unsigned char *auth_data, size_t auth_data_len, size_t block_size,
566    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
567    void (*copy_block)(uint8_t *, uint8_t *),
568    void (*xor_block)(uint8_t *, uint8_t *))
569{
570	uint8_t *ghash, *datap, *authp;
571	size_t remainder, processed;
572
573	/* encrypt zero block to get subkey H */
574	bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
575	encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
576	    (uint8_t *)ctx->gcm_H);
577
578	gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
579	    copy_block, xor_block);
580
581	authp = (uint8_t *)ctx->gcm_tmp;
582	ghash = (uint8_t *)ctx->gcm_ghash;
583	bzero(authp, block_size);
584	bzero(ghash, block_size);
585
586	processed = 0;
587	remainder = auth_data_len;
588	do {
589		if (remainder < block_size) {
590			/*
591			 * There's not a block full of data, pad rest of
592			 * buffer with zero
593			 */
594			bzero(authp, block_size);
595			bcopy(&(auth_data[processed]), authp, remainder);
596			datap = (uint8_t *)authp;
597			remainder = 0;
598		} else {
599			datap = (uint8_t *)(&(auth_data[processed]));
600			processed += block_size;
601			remainder -= block_size;
602		}
603
604		/* add auth data to the hash */
605		GHASH(ctx, datap, ghash);
606
607	} while (remainder > 0);
608
609	return (CRYPTO_SUCCESS);
610}
611
612int
613gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
614    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
615    void (*copy_block)(uint8_t *, uint8_t *),
616    void (*xor_block)(uint8_t *, uint8_t *))
617{
618	int rv;
619	CK_AES_GCM_PARAMS *gcm_param;
620
621	if (param != NULL) {
622		gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
623
624		if ((rv = gcm_validate_args(gcm_param)) != 0) {
625			return (rv);
626		}
627
628		gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
629		gcm_ctx->gcm_tag_len >>= 3;
630		gcm_ctx->gcm_processed_data_len = 0;
631
632		/* these values are in bits */
633		gcm_ctx->gcm_len_a_len_c[0]
634		    = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
635
636		rv = CRYPTO_SUCCESS;
637		gcm_ctx->gcm_flags |= GCM_MODE;
638	} else {
639		rv = CRYPTO_MECHANISM_PARAM_INVALID;
640		goto out;
641	}
642
643	if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
644	    gcm_param->pAAD, gcm_param->ulAADLen, block_size,
645	    encrypt_block, copy_block, xor_block) != 0) {
646		rv = CRYPTO_MECHANISM_PARAM_INVALID;
647	}
648out:
649	return (rv);
650}
651
652int
653gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
654    int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
655    void (*copy_block)(uint8_t *, uint8_t *),
656    void (*xor_block)(uint8_t *, uint8_t *))
657{
658	int rv;
659	CK_AES_GMAC_PARAMS *gmac_param;
660
661	if (param != NULL) {
662		gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
663
664		gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
665		gcm_ctx->gcm_processed_data_len = 0;
666
667		/* these values are in bits */
668		gcm_ctx->gcm_len_a_len_c[0]
669		    = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
670
671		rv = CRYPTO_SUCCESS;
672		gcm_ctx->gcm_flags |= GMAC_MODE;
673	} else {
674		rv = CRYPTO_MECHANISM_PARAM_INVALID;
675		goto out;
676	}
677
678	if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
679	    gmac_param->pAAD, gmac_param->ulAADLen, block_size,
680	    encrypt_block, copy_block, xor_block) != 0) {
681		rv = CRYPTO_MECHANISM_PARAM_INVALID;
682	}
683out:
684	return (rv);
685}
686
687void *
688gcm_alloc_ctx(int kmflag)
689{
690	gcm_ctx_t *gcm_ctx;
691
692#ifdef _KERNEL
693	if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
694#else
695	if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
696#endif
697		return (NULL);
698
699	gcm_ctx->gcm_flags = GCM_MODE;
700	return (gcm_ctx);
701}
702
703void *
704gmac_alloc_ctx(int kmflag)
705{
706	gcm_ctx_t *gcm_ctx;
707
708#ifdef _KERNEL
709	if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
710#else
711	if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
712#endif
713		return (NULL);
714
715	gcm_ctx->gcm_flags = GMAC_MODE;
716	return (gcm_ctx);
717}
718
719void
720gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
721{
722	ctx->gcm_kmflag = kmflag;
723}
724
725
726#ifdef __amd64
727/*
728 * Return 1 if executing on Intel with PCLMULQDQ instructions,
729 * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
730 * Cache the result, as the CPU can't change.
731 *
732 * Note: the userland version uses getisax().  The kernel version uses
733 * is_x86_featureset().
734 */
735static int
736intel_pclmulqdq_instruction_present(void)
737{
738	static int	cached_result = -1;
739
740	if (cached_result == -1) { /* first time */
741#ifdef _KERNEL
742		cached_result =
743		    is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ);
744#else
745		uint_t		ui = 0;
746
747		(void) getisax(&ui, 1);
748		cached_result = (ui & AV_386_PCLMULQDQ) != 0;
749#endif	/* _KERNEL */
750	}
751
752	return (cached_result);
753}
754#endif	/* __amd64 */
755