xref: /illumos-gate/usr/src/uts/common/crypto/io/crypto.c (revision c846684c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright 2018, Joyent, Inc.
25  */
26 
27 
28 /*
29  * The ioctl interface for cryptographic commands.
30  */
31 
32 #include <sys/types.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/stat.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/kmem.h>
39 #include <sys/errno.h>
40 #include <sys/ksynch.h>
41 #include <sys/file.h>
42 #include <sys/open.h>
43 #include <sys/cred.h>
44 #include <sys/proc.h>
45 #include <sys/task.h>
46 #include <sys/mkdev.h>
47 #include <sys/model.h>
48 #include <sys/sysmacros.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/api.h>
51 #include <sys/crypto/impl.h>
52 #include <sys/crypto/sched_impl.h>
53 #include <sys/crypto/ioctl.h>
54 
55 extern int kcf_des3_threshold;
56 extern int kcf_aes_threshold;
57 extern int kcf_rc4_threshold;
58 extern int kcf_md5_threshold;
59 extern int kcf_sha1_threshold;
60 
61 /*
62  * Locking notes:
63  *
64  * crypto_locks protects the global array of minor structures.
65  * crypto_locks is an array of locks indexed by the cpuid. A reader needs
66  * to hold a single lock while a writer needs to hold all locks.
67  * krwlock_t is not an option here because the hold time
68  * is very small for these locks.
69  *
70  * The fields in the minor structure are protected by the cm_lock member
71  * of the minor structure. The cm_cv is used to signal decrements
72  * in the cm_refcnt, and is used with the cm_lock.
73  *
74  * The locking order is crypto_locks followed by cm_lock.
75  */
76 
77 /*
78  * DDI entry points.
79  */
80 static int crypto_attach(dev_info_t *, ddi_attach_cmd_t);
81 static int crypto_detach(dev_info_t *, ddi_detach_cmd_t);
82 static int crypto_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
83 static int crypto_open(dev_t *, int, int, cred_t *);
84 static int crypto_close(dev_t, int, int, cred_t *);
85 static int crypto_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
86 
87 static int cipher_init(dev_t, caddr_t, int, int (*)(crypto_provider_t,
88     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
89     crypto_ctx_template_t, crypto_context_t *, crypto_call_req_t *));
90 
91 static int common_digest(dev_t, caddr_t, int, int (*)(crypto_context_t,
92     crypto_data_t *, crypto_data_t *, crypto_call_req_t *));
93 
94 static int cipher(dev_t, caddr_t, int, int (*)(crypto_context_t,
95     crypto_data_t *, crypto_data_t *, crypto_call_req_t *));
96 
97 static int cipher_update(dev_t, caddr_t, int, int (*)(crypto_context_t,
98     crypto_data_t *, crypto_data_t *, crypto_call_req_t *));
99 
100 static int common_final(dev_t, caddr_t, int, int (*)(crypto_context_t,
101     crypto_data_t *, crypto_call_req_t *));
102 
103 static int sign_verify_init(dev_t, caddr_t, int, int (*)(crypto_provider_t,
104     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
105     crypto_ctx_template_t, crypto_context_t *, crypto_call_req_t *));
106 
107 static int sign_verify_update(dev_t dev, caddr_t arg, int mode,
108     int (*)(crypto_context_t, crypto_data_t *, crypto_call_req_t *));
109 
110 static void crypto_release_provider_session(crypto_minor_t *,
111     crypto_provider_session_t *);
112 static int crypto_buffer_check(size_t);
113 static int crypto_free_find_ctx(crypto_session_data_t *);
114 static int crypto_get_provider_list(crypto_minor_t *, uint_t *,
115     crypto_provider_entry_t **, boolean_t);
116 
117 static int crypto_create_provider_session(crypto_minor_t *,
118     kcf_provider_desc_t *, crypto_session_id_t, crypto_provider_session_t **,
119     kcf_provider_desc_t *);
120 static int crypto_create_session_ptr(crypto_minor_t *, kcf_provider_desc_t *,
121     crypto_provider_session_t *, crypto_session_id_t *);
122 
123 /* number of minor numbers to allocate at a time */
124 #define	CRYPTO_MINOR_CHUNK	16
125 
126 /*
127  * There are two limits associated with kernel memory. The first,
128  * CRYPTO_MAX_BUFFER_LEN, is the maximum number of bytes that can be
129  * allocated for a single copyin/copyout buffer. The second limit is
130  * the total number of bytes that can be allocated by a process
131  * for copyin/copyout buffers. The latter is enforced by the
132  * project.max-crypto-memory resource control.
133  */
134 
135 #define	CRYPTO_MAX_BUFFER_LEN	(2 * 1024 * 1024)
136 #define	CRYPTO_MAX_FIND_COUNT	512
137 
138 /*
139  * We preapprove some bytes for each session to avoid making the costly
140  * crypto_buffer_check() calls. The preapproval is done when a new session
141  * is created and that cost is amortized over later crypto calls.
142  * Most applications create a session and then do a bunch of crypto calls
143  * in that session. So, they benefit from this optimization.
144  *
145  * Note that we may hit the project.max-crypto-memory limit a bit sooner
146  * because of this preapproval. But it is acceptable since the preapproved
147  * amount is insignificant compared to the default max-crypto-memory limit
148  * which is quarter of the machine's memory. The preapproved amount is
149  * roughly 2 * 16K(maximum SSL record size).
150  */
151 #define	CRYPTO_PRE_APPROVED_LIMIT	(32 * 1024)
152 
153 /* The session table grows by CRYPTO_SESSION_CHUNK increments */
154 #define	CRYPTO_SESSION_CHUNK	100
155 
156 size_t crypto_max_buffer_len = CRYPTO_MAX_BUFFER_LEN;
157 size_t crypto_pre_approved_limit = CRYPTO_PRE_APPROVED_LIMIT;
158 
159 #define	INIT_RAW_CRYPTO_DATA(data, len)				\
160 	(data).cd_format = CRYPTO_DATA_RAW;			\
161 	(data).cd_raw.iov_base = (len > 0) ? kmem_alloc(len, KM_SLEEP) : NULL; \
162 	(data).cd_raw.iov_len = len;				\
163 	(data).cd_offset = 0;					\
164 	(data).cd_length = len;
165 
166 static struct kmem_cache *crypto_session_cache;
167 static crypto_minor_t **crypto_minors = NULL;
168 static dev_info_t *crypto_dip = NULL;
169 static minor_t crypto_minor_chunk = CRYPTO_MINOR_CHUNK;
170 static minor_t crypto_minors_table_count = 0;
171 
172 /*
173  * Minors are started from 1 because vmem_alloc()
174  * returns 0 in case of failure.
175  */
176 static vmem_t *crypto_arena = NULL;	/* Arena for device minors */
177 static minor_t crypto_minors_count = 0;
178 static kcf_lock_withpad_t *crypto_locks;
179 
180 #define	CRYPTO_ENTER_ALL_LOCKS()		\
181 	for (i = 0; i < max_ncpus; i++)		\
182 		mutex_enter(&crypto_locks[i].kl_lock);
183 
184 #define	CRYPTO_EXIT_ALL_LOCKS()			\
185 	for (i = 0; i < max_ncpus; i++)		\
186 		mutex_exit(&crypto_locks[i].kl_lock);
187 
188 #define	RETURN_LIST			B_TRUE
189 #define	DONT_RETURN_LIST		B_FALSE
190 
191 #define	CRYPTO_OPS_OFFSET(f)		offsetof(crypto_ops_t, co_##f)
192 #define	CRYPTO_RANDOM_OFFSET(f)		offsetof(crypto_random_number_ops_t, f)
193 #define	CRYPTO_SESSION_OFFSET(f)	offsetof(crypto_session_ops_t, f)
194 #define	CRYPTO_OBJECT_OFFSET(f)		offsetof(crypto_object_ops_t, f)
195 #define	CRYPTO_PROVIDER_OFFSET(f)	\
196 	offsetof(crypto_provider_management_ops_t, f)
197 
198 #define	CRYPTO_CANCEL_CTX(spp) {	\
199 	crypto_cancel_ctx(*(spp));	\
200 	*(spp) = NULL;			\
201 }
202 
203 #define	CRYPTO_CANCEL_ALL_CTX(sp) {				\
204 	if ((sp)->sd_digest_ctx != NULL) {			\
205 		crypto_cancel_ctx((sp)->sd_digest_ctx);		\
206 		(sp)->sd_digest_ctx = NULL;			\
207 	}							\
208 	if ((sp)->sd_encr_ctx != NULL) {			\
209 		crypto_cancel_ctx((sp)->sd_encr_ctx);		\
210 		(sp)->sd_encr_ctx = NULL;			\
211 	}							\
212 	if ((sp)->sd_decr_ctx != NULL) {			\
213 		crypto_cancel_ctx((sp)->sd_decr_ctx);		\
214 		(sp)->sd_decr_ctx = NULL;			\
215 	}							\
216 	if ((sp)->sd_sign_ctx != NULL) {			\
217 		crypto_cancel_ctx((sp)->sd_sign_ctx);		\
218 		(sp)->sd_sign_ctx = NULL;			\
219 	}							\
220 	if ((sp)->sd_verify_ctx != NULL) {			\
221 		crypto_cancel_ctx((sp)->sd_verify_ctx);		\
222 		(sp)->sd_verify_ctx = NULL;			\
223 	}							\
224 	if ((sp)->sd_sign_recover_ctx != NULL) {		\
225 		crypto_cancel_ctx((sp)->sd_sign_recover_ctx);	\
226 		(sp)->sd_sign_recover_ctx = NULL;		\
227 	}							\
228 	if ((sp)->sd_verify_recover_ctx != NULL) {		\
229 		crypto_cancel_ctx((sp)->sd_verify_recover_ctx);	\
230 		(sp)->sd_verify_recover_ctx = NULL;		\
231 	}							\
232 	if ((sp)->sd_mac_ctx != NULL) {		\
233 		crypto_cancel_ctx((sp)->sd_mac_ctx);	\
234 		(sp)->sd_mac_ctx = NULL;		\
235 	}							\
236 }
237 
238 #define	CRYPTO_DECREMENT_RCTL(val)	if ((val) != 0) {	\
239 	kproject_t *projp;					\
240 	mutex_enter(&curproc->p_lock);				\
241 	projp = curproc->p_task->tk_proj;			\
242 	ASSERT(projp != NULL);					\
243 	mutex_enter(&(projp->kpj_data.kpd_crypto_lock));	\
244 	projp->kpj_data.kpd_crypto_mem -= (val);		\
245 	mutex_exit(&(projp->kpj_data.kpd_crypto_lock));		\
246 	curproc->p_crypto_mem -= (val);				\
247 	mutex_exit(&curproc->p_lock);				\
248 }
249 
250 /*
251  * We do not need to hold sd_lock in the macros below
252  * as they are called after doing a get_session_ptr() which
253  * sets the CRYPTO_SESSION_IS_BUSY flag.
254  */
255 #define	CRYPTO_DECREMENT_RCTL_SESSION(sp, val, rctl_chk)	\
256 	if (((val) != 0) && ((sp) != NULL)) {			\
257 		ASSERT(((sp)->sd_flags & CRYPTO_SESSION_IS_BUSY) != 0);	\
258 		if (rctl_chk) {				\
259 			CRYPTO_DECREMENT_RCTL(val);		\
260 		} else {					\
261 			(sp)->sd_pre_approved_amount += (val);	\
262 		}						\
263 	}
264 
265 #define	CRYPTO_BUFFER_CHECK(sp, need, rctl_chk)		\
266 	((sp->sd_pre_approved_amount >= need) ?			\
267 	(sp->sd_pre_approved_amount -= need,			\
268 	    rctl_chk = B_FALSE, CRYPTO_SUCCESS) :		\
269 	    (rctl_chk = B_TRUE, crypto_buffer_check(need)))
270 
271 /*
272  * Module linkage.
273  */
274 static struct cb_ops cbops = {
275 	crypto_open,		/* cb_open */
276 	crypto_close,		/* cb_close */
277 	nodev,			/* cb_strategy */
278 	nodev,			/* cb_print */
279 	nodev,			/* cb_dump */
280 	nodev,			/* cb_read */
281 	nodev,			/* cb_write */
282 	crypto_ioctl,		/* cb_ioctl */
283 	nodev,			/* cb_devmap */
284 	nodev,			/* cb_mmap */
285 	nodev,			/* cb_segmap */
286 	nochpoll,		/* cb_chpoll */
287 	ddi_prop_op,		/* cb_prop_op */
288 	NULL,			/* cb_streamtab */
289 	D_MP,			/* cb_flag */
290 	CB_REV,			/* cb_rev */
291 	nodev,			/* cb_aread */
292 	nodev,			/* cb_awrite */
293 };
294 
295 static struct dev_ops devops = {
296 	DEVO_REV,		/* devo_rev */
297 	0,			/* devo_refcnt */
298 	crypto_getinfo,		/* devo_getinfo */
299 	nulldev,		/* devo_identify */
300 	nulldev,		/* devo_probe */
301 	crypto_attach,		/* devo_attach */
302 	crypto_detach,		/* devo_detach */
303 	nodev,			/* devo_reset */
304 	&cbops,			/* devo_cb_ops */
305 	NULL,			/* devo_bus_ops */
306 	NULL,			/* devo_power */
307 	ddi_quiesce_not_needed,		/* devo_quiesce */
308 };
309 
310 static struct modldrv modldrv = {
311 	&mod_driverops,					/* drv_modops */
312 	"Cryptographic Library Interface",	/* drv_linkinfo */
313 	&devops,
314 };
315 
316 static struct modlinkage modlinkage = {
317 	MODREV_1,		/* ml_rev */
318 	&modldrv,		/* ml_linkage */
319 	NULL
320 };
321 
322 /*
323  * DDI entry points.
324  */
325 int
_init(void)326 _init(void)
327 {
328 	return (mod_install(&modlinkage));
329 }
330 
331 int
_fini(void)332 _fini(void)
333 {
334 	return (mod_remove(&modlinkage));
335 }
336 
337 int
_info(struct modinfo * modinfop)338 _info(struct modinfo *modinfop)
339 {
340 	return (mod_info(&modlinkage, modinfop));
341 }
342 
343 /* ARGSUSED */
344 static int
crypto_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)345 crypto_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
346 {
347 	switch (cmd) {
348 	case DDI_INFO_DEVT2DEVINFO:
349 		*result = crypto_dip;
350 		return (DDI_SUCCESS);
351 
352 	case DDI_INFO_DEVT2INSTANCE:
353 		*result = (void *)0;
354 		return (DDI_SUCCESS);
355 	}
356 	return (DDI_FAILURE);
357 }
358 
359 static int
crypto_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)360 crypto_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
361 {
362 	int i;
363 
364 	if (cmd != DDI_ATTACH) {
365 		return (DDI_FAILURE);
366 	}
367 
368 	if (ddi_get_instance(dip) != 0) {
369 		/* we only allow instance 0 to attach */
370 		return (DDI_FAILURE);
371 	}
372 
373 	crypto_session_cache = kmem_cache_create("crypto_session_cache",
374 	    sizeof (crypto_session_data_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
375 
376 	if (crypto_session_cache == NULL)
377 		return (DDI_FAILURE);
378 
379 	/* create the minor node */
380 	if (ddi_create_minor_node(dip, "crypto", S_IFCHR, 0,
381 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
382 		kmem_cache_destroy(crypto_session_cache);
383 		crypto_session_cache = NULL;
384 		cmn_err(CE_WARN, "crypto_attach: failed creating minor node");
385 		ddi_remove_minor_node(dip, NULL);
386 		return (DDI_FAILURE);
387 	}
388 
389 	crypto_locks = kmem_zalloc(max_ncpus * sizeof (kcf_lock_withpad_t),
390 	    KM_SLEEP);
391 	for (i = 0; i < max_ncpus; i++)
392 		mutex_init(&crypto_locks[i].kl_lock, NULL, MUTEX_DRIVER, NULL);
393 
394 	crypto_dip = dip;
395 
396 	/* allocate integer space for minor numbers */
397 	crypto_arena = vmem_create("crypto", (void *)1,
398 	    CRYPTO_MINOR_CHUNK, 1, NULL, NULL, NULL, 0,
399 	    VM_SLEEP | VMC_IDENTIFIER);
400 
401 	return (DDI_SUCCESS);
402 }
403 
404 static int
crypto_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)405 crypto_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
406 {
407 	minor_t i;
408 	kcf_lock_withpad_t *mp;
409 
410 	if (cmd != DDI_DETACH)
411 		return (DDI_FAILURE);
412 
413 	mp = &crypto_locks[CPU_SEQID];
414 	mutex_enter(&mp->kl_lock);
415 
416 	/* check if device is open */
417 	for (i = 0; i < crypto_minors_table_count; i++) {
418 		if (crypto_minors[i] != NULL) {
419 			mutex_exit(&mp->kl_lock);
420 			return (DDI_FAILURE);
421 		}
422 	}
423 	mutex_exit(&mp->kl_lock);
424 
425 	crypto_dip = NULL;
426 	ddi_remove_minor_node(dip, NULL);
427 
428 	kmem_cache_destroy(crypto_session_cache);
429 	crypto_session_cache = NULL;
430 
431 	kmem_free(crypto_minors,
432 	    sizeof (crypto_minor_t *) * crypto_minors_table_count);
433 	crypto_minors = NULL;
434 	crypto_minors_table_count = 0;
435 	for (i = 0; i < max_ncpus; i++)
436 		mutex_destroy(&crypto_locks[i].kl_lock);
437 	kmem_free(crypto_locks, max_ncpus * sizeof (kcf_lock_withpad_t));
438 	crypto_locks = NULL;
439 
440 	vmem_destroy(crypto_arena);
441 	crypto_arena = NULL;
442 
443 	return (DDI_SUCCESS);
444 }
445 
446 /* ARGSUSED3 */
447 static int
crypto_open(dev_t * devp,int flag,int otyp,cred_t * credp)448 crypto_open(dev_t *devp, int flag, int otyp, cred_t *credp)
449 {
450 	crypto_minor_t *cm = NULL;
451 	minor_t mn;
452 	kcf_lock_withpad_t *mp;
453 	int i;
454 
455 	if (otyp != OTYP_CHR)
456 		return (ENXIO);
457 
458 	if (crypto_dip == NULL)
459 		return (ENXIO);
460 
461 	/* exclusive opens are not supported */
462 	if (flag & FEXCL)
463 		return (ENOTSUP);
464 
465 again:
466 	mp = &crypto_locks[CPU_SEQID];
467 	mutex_enter(&mp->kl_lock);
468 
469 	/* grow the minors table if needed */
470 	if (crypto_minors_count >= crypto_minors_table_count) {
471 		crypto_minor_t **newtable;
472 		minor_t chunk = crypto_minor_chunk;
473 		minor_t saved_count;
474 		size_t new_size;
475 		ulong_t big_count;
476 
477 		big_count = crypto_minors_count + chunk;
478 		if (big_count > MAXMIN) {
479 			mutex_exit(&mp->kl_lock);
480 			return (ENOMEM);
481 		}
482 
483 		saved_count = crypto_minors_table_count;
484 		new_size = sizeof (crypto_minor_t *) *
485 		    (crypto_minors_table_count + chunk);
486 
487 		mutex_exit(&mp->kl_lock);
488 
489 		newtable = kmem_zalloc(new_size, KM_SLEEP);
490 		CRYPTO_ENTER_ALL_LOCKS();
491 		/*
492 		 * Check if table grew while we were sleeping.
493 		 * The minors table never shrinks.
494 		 */
495 		if (crypto_minors_table_count > saved_count) {
496 			CRYPTO_EXIT_ALL_LOCKS();
497 			kmem_free(newtable, new_size);
498 			goto again;
499 		}
500 
501 		/* we assume that bcopy() will return if count is 0 */
502 		bcopy(crypto_minors, newtable,
503 		    sizeof (crypto_minor_t *) * crypto_minors_table_count);
504 
505 		kmem_free(crypto_minors,
506 		    sizeof (crypto_minor_t *) * crypto_minors_table_count);
507 
508 		/* grow the minors number space */
509 		if (crypto_minors_table_count != 0) {
510 			(void) vmem_add(crypto_arena,
511 			    (void *)(uintptr_t)(crypto_minors_table_count + 1),
512 			    crypto_minor_chunk, VM_SLEEP);
513 		}
514 
515 		crypto_minors = newtable;
516 		crypto_minors_table_count += chunk;
517 		CRYPTO_EXIT_ALL_LOCKS();
518 	} else {
519 		mutex_exit(&mp->kl_lock);
520 	}
521 
522 	/* allocate a new minor number starting with 1 */
523 	mn = (minor_t)(uintptr_t)vmem_alloc(crypto_arena, 1, VM_SLEEP);
524 
525 	cm = kmem_zalloc(sizeof (crypto_minor_t), KM_SLEEP);
526 	mutex_init(&cm->cm_lock, NULL, MUTEX_DRIVER, NULL);
527 	cv_init(&cm->cm_cv, NULL, CV_DRIVER, NULL);
528 
529 	CRYPTO_ENTER_ALL_LOCKS();
530 	cm->cm_refcnt = 1;
531 	crypto_minors[mn - 1] = cm;
532 	crypto_minors_count++;
533 	CRYPTO_EXIT_ALL_LOCKS();
534 
535 	*devp = makedevice(getmajor(*devp), mn);
536 
537 	return (0);
538 }
539 
540 /* ARGSUSED1 */
541 static int
crypto_close(dev_t dev,int flag,int otyp,cred_t * credp)542 crypto_close(dev_t dev, int flag, int otyp, cred_t *credp)
543 {
544 	crypto_minor_t *cm = NULL;
545 	crypto_session_data_t *sp;
546 	minor_t mn = getminor(dev);
547 	uint_t i;
548 	size_t total = 0;
549 	kcf_lock_withpad_t *mp;
550 
551 	mp = &crypto_locks[CPU_SEQID];
552 	mutex_enter(&mp->kl_lock);
553 
554 	if (mn > crypto_minors_table_count) {
555 		mutex_exit(&mp->kl_lock);
556 		cmn_err(CE_WARN, "crypto_close: bad minor (too big) %d", mn);
557 		return (ENODEV);
558 	}
559 
560 	cm = crypto_minors[mn - 1];
561 	if (cm == NULL) {
562 		mutex_exit(&mp->kl_lock);
563 		cmn_err(CE_WARN, "crypto_close: duplicate close of minor %d",
564 		    getminor(dev));
565 		return (ENODEV);
566 	}
567 
568 	mutex_exit(&mp->kl_lock);
569 
570 	CRYPTO_ENTER_ALL_LOCKS();
571 	/*
572 	 * We free the minor number, mn, from the crypto_arena
573 	 * only later. This ensures that we won't race with another
574 	 * thread in crypto_open with the same minor number.
575 	 */
576 	crypto_minors[mn - 1] = NULL;
577 	crypto_minors_count--;
578 	CRYPTO_EXIT_ALL_LOCKS();
579 
580 	mutex_enter(&cm->cm_lock);
581 	cm->cm_refcnt --;		/* decrement refcnt held in open */
582 	while (cm->cm_refcnt > 0) {
583 		cv_wait(&cm->cm_cv, &cm->cm_lock);
584 	}
585 
586 	vmem_free(crypto_arena, (void *)(uintptr_t)mn, 1);
587 
588 	/* free all session table entries starting with 1 */
589 	for (i = 1; i < cm->cm_session_table_count; i++) {
590 		if (cm->cm_session_table[i] == NULL)
591 			continue;
592 
593 		sp = cm->cm_session_table[i];
594 		ASSERT((sp->sd_flags & CRYPTO_SESSION_IS_BUSY) == 0);
595 		ASSERT(sp->sd_pre_approved_amount == 0 ||
596 		    sp->sd_pre_approved_amount == crypto_pre_approved_limit);
597 		total += sp->sd_pre_approved_amount;
598 		if (sp->sd_find_init_cookie != NULL) {
599 			(void) crypto_free_find_ctx(sp);
600 		}
601 		crypto_release_provider_session(cm, sp->sd_provider_session);
602 		KCF_PROV_REFRELE(sp->sd_provider);
603 		CRYPTO_CANCEL_ALL_CTX(sp);
604 		mutex_destroy(&sp->sd_lock);
605 		cv_destroy(&sp->sd_cv);
606 		kmem_cache_free(crypto_session_cache, sp);
607 		cm->cm_session_table[i] = NULL;
608 	}
609 
610 	/* free the session table */
611 	if (cm->cm_session_table != NULL && cm->cm_session_table_count > 0)
612 		kmem_free(cm->cm_session_table, cm->cm_session_table_count *
613 		    sizeof (void *));
614 
615 	total += (cm->cm_session_table_count * sizeof (void *));
616 	CRYPTO_DECREMENT_RCTL(total);
617 
618 	kcf_free_provider_tab(cm->cm_provider_count,
619 	    cm->cm_provider_array);
620 
621 	mutex_exit(&cm->cm_lock);
622 	mutex_destroy(&cm->cm_lock);
623 	cv_destroy(&cm->cm_cv);
624 	kmem_free(cm, sizeof (crypto_minor_t));
625 
626 	return (0);
627 }
628 
629 static crypto_minor_t *
crypto_hold_minor(minor_t minor)630 crypto_hold_minor(minor_t minor)
631 {
632 	crypto_minor_t *cm;
633 	kcf_lock_withpad_t *mp;
634 
635 	if (minor > crypto_minors_table_count)
636 		return (NULL);
637 
638 	mp = &crypto_locks[CPU_SEQID];
639 	mutex_enter(&mp->kl_lock);
640 
641 	if ((cm = crypto_minors[minor - 1]) != NULL) {
642 		atomic_inc_32(&cm->cm_refcnt);
643 	}
644 	mutex_exit(&mp->kl_lock);
645 	return (cm);
646 }
647 
648 static void
crypto_release_minor(crypto_minor_t * cm)649 crypto_release_minor(crypto_minor_t *cm)
650 {
651 	if (atomic_dec_32_nv(&cm->cm_refcnt) == 0) {
652 		cv_signal(&cm->cm_cv);
653 	}
654 }
655 
656 /*
657  * Build a list of functions and other information for the provider, pd.
658  */
659 static void
crypto_build_function_list(crypto_function_list_t * fl,kcf_provider_desc_t * pd)660 crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
661 {
662 	crypto_ops_t *ops;
663 	crypto_digest_ops_t *digest_ops;
664 	crypto_cipher_ops_t *cipher_ops;
665 	crypto_mac_ops_t *mac_ops;
666 	crypto_sign_ops_t *sign_ops;
667 	crypto_verify_ops_t *verify_ops;
668 	crypto_dual_ops_t *dual_ops;
669 	crypto_random_number_ops_t *random_number_ops;
670 	crypto_session_ops_t *session_ops;
671 	crypto_object_ops_t *object_ops;
672 	crypto_key_ops_t *key_ops;
673 	crypto_provider_management_ops_t *provider_ops;
674 
675 	if ((ops = pd->pd_ops_vector) == NULL)
676 		return;
677 
678 	if ((digest_ops = ops->co_digest_ops) != NULL) {
679 		if (digest_ops->digest_init != NULL)
680 			fl->fl_digest_init = B_TRUE;
681 		if (digest_ops->digest != NULL)
682 			fl->fl_digest = B_TRUE;
683 		if (digest_ops->digest_update != NULL)
684 			fl->fl_digest_update = B_TRUE;
685 		if (digest_ops->digest_key != NULL)
686 			fl->fl_digest_key = B_TRUE;
687 		if (digest_ops->digest_final != NULL)
688 			fl->fl_digest_final = B_TRUE;
689 	}
690 	if ((cipher_ops = ops->co_cipher_ops) != NULL) {
691 		if (cipher_ops->encrypt_init != NULL)
692 			fl->fl_encrypt_init = B_TRUE;
693 		if (cipher_ops->encrypt != NULL)
694 			fl->fl_encrypt = B_TRUE;
695 		if (cipher_ops->encrypt_update != NULL)
696 			fl->fl_encrypt_update = B_TRUE;
697 		if (cipher_ops->encrypt_final != NULL)
698 			fl->fl_encrypt_final = B_TRUE;
699 		if (cipher_ops->decrypt_init != NULL)
700 			fl->fl_decrypt_init = B_TRUE;
701 		if (cipher_ops->decrypt != NULL)
702 			fl->fl_decrypt = B_TRUE;
703 		if (cipher_ops->decrypt_update != NULL)
704 			fl->fl_decrypt_update = B_TRUE;
705 		if (cipher_ops->decrypt_final != NULL)
706 			fl->fl_decrypt_final = B_TRUE;
707 	}
708 	if ((mac_ops = ops->co_mac_ops) != NULL) {
709 		if (mac_ops->mac_init != NULL)
710 			fl->fl_mac_init = B_TRUE;
711 		if (mac_ops->mac != NULL)
712 			fl->fl_mac = B_TRUE;
713 		if (mac_ops->mac_update != NULL)
714 			fl->fl_mac_update = B_TRUE;
715 		if (mac_ops->mac_final != NULL)
716 			fl->fl_mac_final = B_TRUE;
717 	}
718 	if ((sign_ops = ops->co_sign_ops) != NULL) {
719 		if (sign_ops->sign_init != NULL)
720 			fl->fl_sign_init = B_TRUE;
721 		if (sign_ops->sign != NULL)
722 			fl->fl_sign = B_TRUE;
723 		if (sign_ops->sign_update != NULL)
724 			fl->fl_sign_update = B_TRUE;
725 		if (sign_ops->sign_final != NULL)
726 			fl->fl_sign_final = B_TRUE;
727 		if (sign_ops->sign_recover_init != NULL)
728 			fl->fl_sign_recover_init = B_TRUE;
729 		if (sign_ops->sign_recover != NULL)
730 			fl->fl_sign_recover = B_TRUE;
731 	}
732 	if ((verify_ops = ops->co_verify_ops) != NULL) {
733 		if (verify_ops->verify_init != NULL)
734 			fl->fl_verify_init = B_TRUE;
735 		if (verify_ops->verify != NULL)
736 			fl->fl_verify = B_TRUE;
737 		if (verify_ops->verify_update != NULL)
738 			fl->fl_verify_update = B_TRUE;
739 		if (verify_ops->verify_final != NULL)
740 			fl->fl_verify_final = B_TRUE;
741 		if (verify_ops->verify_recover_init != NULL)
742 			fl->fl_verify_recover_init = B_TRUE;
743 		if (verify_ops->verify_recover != NULL)
744 			fl->fl_verify_recover = B_TRUE;
745 	}
746 	if ((dual_ops = ops->co_dual_ops) != NULL) {
747 		if (dual_ops->digest_encrypt_update != NULL)
748 			fl->fl_digest_encrypt_update = B_TRUE;
749 		if (dual_ops->decrypt_digest_update != NULL)
750 			fl->fl_decrypt_digest_update = B_TRUE;
751 		if (dual_ops->sign_encrypt_update != NULL)
752 			fl->fl_sign_encrypt_update = B_TRUE;
753 		if (dual_ops->decrypt_verify_update != NULL)
754 			fl->fl_decrypt_verify_update = B_TRUE;
755 	}
756 	if ((random_number_ops = ops->co_random_ops) != NULL) {
757 		if (random_number_ops->seed_random != NULL)
758 			fl->fl_seed_random = B_TRUE;
759 		if (random_number_ops->generate_random != NULL)
760 			fl->fl_generate_random = B_TRUE;
761 	}
762 	if ((session_ops = ops->co_session_ops) != NULL) {
763 		if (session_ops->session_open != NULL)
764 			fl->fl_session_open = B_TRUE;
765 		if (session_ops->session_close != NULL)
766 			fl->fl_session_close = B_TRUE;
767 		if (session_ops->session_login != NULL)
768 			fl->fl_session_login = B_TRUE;
769 		if (session_ops->session_logout != NULL)
770 			fl->fl_session_logout = B_TRUE;
771 	}
772 	if ((object_ops = ops->co_object_ops) != NULL) {
773 		if (object_ops->object_create != NULL)
774 			fl->fl_object_create = B_TRUE;
775 		if (object_ops->object_copy != NULL)
776 			fl->fl_object_copy = B_TRUE;
777 		if (object_ops->object_destroy != NULL)
778 			fl->fl_object_destroy = B_TRUE;
779 		if (object_ops->object_get_size != NULL)
780 			fl->fl_object_get_size = B_TRUE;
781 		if (object_ops->object_get_attribute_value != NULL)
782 			fl->fl_object_get_attribute_value = B_TRUE;
783 		if (object_ops->object_set_attribute_value != NULL)
784 			fl->fl_object_set_attribute_value = B_TRUE;
785 		if (object_ops->object_find_init != NULL)
786 			fl->fl_object_find_init = B_TRUE;
787 		if (object_ops->object_find != NULL)
788 			fl->fl_object_find = B_TRUE;
789 		if (object_ops->object_find_final != NULL)
790 			fl->fl_object_find_final = B_TRUE;
791 	}
792 	if ((key_ops = ops->co_key_ops) != NULL) {
793 		if (key_ops->key_generate != NULL)
794 			fl->fl_key_generate = B_TRUE;
795 		if (key_ops->key_generate_pair != NULL)
796 			fl->fl_key_generate_pair = B_TRUE;
797 		if (key_ops->key_wrap != NULL)
798 			fl->fl_key_wrap = B_TRUE;
799 		if (key_ops->key_unwrap != NULL)
800 			fl->fl_key_unwrap = B_TRUE;
801 		if (key_ops->key_derive != NULL)
802 			fl->fl_key_derive = B_TRUE;
803 	}
804 	if ((provider_ops = ops->co_provider_ops) != NULL) {
805 		if (provider_ops->init_token != NULL)
806 			fl->fl_init_token = B_TRUE;
807 		if (provider_ops->init_pin != NULL)
808 			fl->fl_init_pin = B_TRUE;
809 		if (provider_ops->set_pin != NULL)
810 			fl->fl_set_pin = B_TRUE;
811 	}
812 
813 	fl->prov_is_hash_limited = pd->pd_flags & CRYPTO_HASH_NO_UPDATE;
814 	if (fl->prov_is_hash_limited) {
815 		fl->prov_hash_limit = min(pd->pd_hash_limit,
816 		    min(CRYPTO_MAX_BUFFER_LEN,
817 		    curproc->p_task->tk_proj->kpj_data.kpd_crypto_mem_ctl));
818 	}
819 
820 	fl->prov_is_hmac_limited = pd->pd_flags & CRYPTO_HMAC_NO_UPDATE;
821 	if (fl->prov_is_hmac_limited) {
822 		fl->prov_hmac_limit = min(pd->pd_hmac_limit,
823 		    min(CRYPTO_MAX_BUFFER_LEN,
824 		    curproc->p_task->tk_proj->kpj_data.kpd_crypto_mem_ctl));
825 	}
826 
827 	if (fl->prov_is_hash_limited || fl->prov_is_hmac_limited) {
828 		/*
829 		 * XXX - The threshold should ideally be per hash/HMAC
830 		 * mechanism. For now, we use the same value for all
831 		 * hash/HMAC mechanisms. Empirical evidence suggests this
832 		 * is fine.
833 		 */
834 		fl->prov_hash_threshold = kcf_md5_threshold;
835 	}
836 
837 	fl->total_threshold_count = MAX_NUM_THRESHOLD;
838 	fl->fl_threshold[0].mech_type = CKM_DES3_CBC;
839 	fl->fl_threshold[0].mech_threshold = kcf_des3_threshold;
840 	fl->fl_threshold[1].mech_type = CKM_DES3_ECB;
841 	fl->fl_threshold[1].mech_threshold = kcf_des3_threshold;
842 	fl->fl_threshold[2].mech_type = CKM_AES_CBC;
843 	fl->fl_threshold[2].mech_threshold = kcf_aes_threshold;
844 	fl->fl_threshold[3].mech_type = CKM_AES_ECB;
845 	fl->fl_threshold[3].mech_threshold = kcf_aes_threshold;
846 	fl->fl_threshold[4].mech_type = CKM_RC4;
847 	fl->fl_threshold[4].mech_threshold = kcf_rc4_threshold;
848 	fl->fl_threshold[5].mech_type = CKM_MD5;
849 	fl->fl_threshold[5].mech_threshold = kcf_md5_threshold;
850 	fl->fl_threshold[6].mech_type = CKM_SHA_1;
851 	fl->fl_threshold[6].mech_threshold = kcf_sha1_threshold;
852 }
853 
854 /* ARGSUSED */
855 static int
get_function_list(dev_t dev,caddr_t arg,int mode,int * rval)856 get_function_list(dev_t dev, caddr_t arg, int mode, int *rval)
857 {
858 	crypto_get_function_list_t get_function_list;
859 	crypto_minor_t *cm;
860 	crypto_provider_id_t provider_id;
861 	crypto_function_list_t *fl;
862 	kcf_provider_desc_t *provider;
863 	int rv;
864 
865 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
866 		cmn_err(CE_WARN, "get_function_list: failed holding minor");
867 		return (ENXIO);
868 	}
869 
870 	if (copyin(arg, &get_function_list, sizeof (get_function_list)) != 0) {
871 		crypto_release_minor(cm);
872 		return (EFAULT);
873 	}
874 
875 	/* initialize provider_array */
876 	if (cm->cm_provider_array == NULL) {
877 		rv = crypto_get_provider_list(cm, NULL, NULL, DONT_RETURN_LIST);
878 		if (rv != CRYPTO_SUCCESS) {
879 			goto release_minor;
880 		}
881 	}
882 
883 	provider_id = get_function_list.fl_provider_id;
884 	mutex_enter(&cm->cm_lock);
885 	/* index must be less than count of providers */
886 	if (provider_id >= cm->cm_provider_count) {
887 		mutex_exit(&cm->cm_lock);
888 		rv = CRYPTO_ARGUMENTS_BAD;
889 		goto release_minor;
890 	}
891 
892 	ASSERT(cm->cm_provider_array != NULL);
893 	provider = cm->cm_provider_array[provider_id];
894 	mutex_exit(&cm->cm_lock);
895 
896 	fl = &get_function_list.fl_list;
897 	bzero(fl, sizeof (crypto_function_list_t));
898 
899 	if (provider->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
900 		crypto_build_function_list(fl, provider);
901 	} else {
902 		kcf_provider_desc_t *prev = NULL, *pd;
903 
904 		mutex_enter(&provider->pd_lock);
905 		while (kcf_get_next_logical_provider_member(provider,
906 		    prev, &pd)) {
907 			prev = pd;
908 			crypto_build_function_list(fl, pd);
909 			KCF_PROV_REFRELE(pd);
910 		}
911 		mutex_exit(&provider->pd_lock);
912 	}
913 
914 	rv = CRYPTO_SUCCESS;
915 
916 release_minor:
917 	crypto_release_minor(cm);
918 
919 	get_function_list.fl_return_value = rv;
920 
921 	if (copyout(&get_function_list, arg, sizeof (get_function_list)) != 0) {
922 		return (EFAULT);
923 	}
924 	return (0);
925 }
926 
927 /*
928  * This ioctl maps a PKCS#11 mechanism string into an internal number
929  * that is used by the kernel.  pn_internal_number is set to the
930  * internal number.
931  */
932 /* ARGSUSED */
933 static int
get_mechanism_number(dev_t dev,caddr_t arg,int mode,int * rval)934 get_mechanism_number(dev_t dev, caddr_t arg, int mode, int *rval)
935 {
936 	STRUCT_DECL(crypto_get_mechanism_number, get_number);
937 	crypto_mech_type_t number;
938 	size_t len;
939 	char *mechanism_name;
940 	int rv;
941 
942 	STRUCT_INIT(get_number, mode);
943 
944 	if (copyin(arg, STRUCT_BUF(get_number), STRUCT_SIZE(get_number)) != 0)
945 		return (EFAULT);
946 
947 	len = STRUCT_FGET(get_number, pn_mechanism_len);
948 	if (len == 0 || len > CRYPTO_MAX_MECH_NAME) {
949 		rv = CRYPTO_ARGUMENTS_BAD;
950 		goto out;
951 	}
952 	mechanism_name = kmem_alloc(len, KM_SLEEP);
953 
954 	if (copyin(STRUCT_FGETP(get_number, pn_mechanism_string),
955 	    mechanism_name, len) != 0) {
956 		kmem_free(mechanism_name, len);
957 		return (EFAULT);
958 	}
959 
960 	/*
961 	 * Get mechanism number from kcf. We set the load_module
962 	 * flag to false since we use only hardware providers.
963 	 */
964 	number = crypto_mech2id_common(mechanism_name, B_FALSE);
965 	kmem_free(mechanism_name, len);
966 	if (number == CRYPTO_MECH_INVALID) {
967 		rv = CRYPTO_ARGUMENTS_BAD;
968 		goto out;
969 	}
970 
971 	bcopy((char *)&number, (char *)STRUCT_FADDR(get_number,
972 	    pn_internal_number), sizeof (number));
973 
974 	rv = CRYPTO_SUCCESS;
975 out:
976 	STRUCT_FSET(get_number, pn_return_value, rv);
977 
978 	if (copyout(STRUCT_BUF(get_number), arg,
979 	    STRUCT_SIZE(get_number)) != 0) {
980 		return (EFAULT);
981 	}
982 	return (0);
983 }
984 
985 /*
986  * This ioctl returns an array of crypto_mech_name_t entries.
987  * It lists all the PKCS#11 mechanisms available in the kernel.
988  */
989 /* ARGSUSED */
990 static int
get_mechanism_list(dev_t dev,caddr_t arg,int mode,int * rval)991 get_mechanism_list(dev_t dev, caddr_t arg, int mode, int *rval)
992 {
993 	STRUCT_DECL(crypto_get_mechanism_list, get_list);
994 	crypto_mech_name_t *entries;
995 	size_t copyout_size;
996 	uint_t req_count;
997 	uint_t count;
998 	ulong_t offset;
999 	int error = 0;
1000 
1001 	STRUCT_INIT(get_list, mode);
1002 
1003 	if (copyin(arg, STRUCT_BUF(get_list), STRUCT_SIZE(get_list)) != 0) {
1004 		return (EFAULT);
1005 	}
1006 
1007 	entries = crypto_get_mech_list(&count, KM_SLEEP);
1008 
1009 	/* Number of entries caller thinks we have */
1010 	req_count = STRUCT_FGET(get_list, ml_count);
1011 
1012 	STRUCT_FSET(get_list, ml_count, count);
1013 	STRUCT_FSET(get_list, ml_return_value, CRYPTO_SUCCESS);
1014 
1015 	/* check if buffer is too small */
1016 	if (count > req_count) {
1017 		STRUCT_FSET(get_list, ml_return_value, CRYPTO_BUFFER_TOO_SMALL);
1018 	}
1019 
1020 	/* copyout the first stuff */
1021 	if (copyout(STRUCT_BUF(get_list), arg, STRUCT_SIZE(get_list)) != 0) {
1022 		error = EFAULT;
1023 	}
1024 
1025 	/*
1026 	 * If only requesting number of entries or buffer too small or an
1027 	 * error occurred, stop here
1028 	 */
1029 	if (req_count == 0 || count > req_count || error != 0) {
1030 		goto out;
1031 	}
1032 
1033 	copyout_size = count * sizeof (crypto_mech_name_t);
1034 
1035 	/* copyout entries */
1036 	offset = (ulong_t)STRUCT_FADDR(get_list, ml_list);
1037 	offset -= (ulong_t)STRUCT_BUF(get_list);
1038 	if (copyout(entries, arg + offset, copyout_size) != 0) {
1039 		error = EFAULT;
1040 	}
1041 
1042 out:
1043 	crypto_free_mech_list(entries, count);
1044 	return (error);
1045 }
1046 
1047 /*
1048  * Copyout kernel array of mech_infos to user space.
1049  */
1050 /* ARGSUSED */
1051 static int
copyout_mechinfos(int mode,caddr_t out,uint_t count,crypto_mechanism_info_t * k_minfos,caddr_t u_minfos)1052 copyout_mechinfos(int mode, caddr_t out, uint_t count,
1053     crypto_mechanism_info_t *k_minfos, caddr_t u_minfos)
1054 {
1055 	STRUCT_DECL(crypto_mechanism_info, mi);
1056 	caddr_t p;
1057 	size_t len;
1058 	int i;
1059 
1060 	if (count == 0)
1061 		return (0);
1062 
1063 	STRUCT_INIT(mi, mode);
1064 
1065 	len = count * STRUCT_SIZE(mi);
1066 
1067 	ASSERT(u_minfos != NULL);
1068 	p = u_minfos;
1069 	for (i = 0; i < count; i++) {
1070 		STRUCT_FSET(mi, mi_min_key_size, k_minfos[i].mi_min_key_size);
1071 		STRUCT_FSET(mi, mi_max_key_size, k_minfos[i].mi_max_key_size);
1072 		STRUCT_FSET(mi, mi_keysize_unit, k_minfos[i].mi_keysize_unit);
1073 		STRUCT_FSET(mi, mi_usage, k_minfos[i].mi_usage);
1074 		bcopy(STRUCT_BUF(mi), p, STRUCT_SIZE(mi));
1075 		p += STRUCT_SIZE(mi);
1076 	}
1077 
1078 	if (copyout(u_minfos, out, len) != 0)
1079 		return (EFAULT);
1080 
1081 	return (0);
1082 }
1083 
1084 /*
1085  * This ioctl returns information for the specified mechanism.
1086  */
1087 /* ARGSUSED */
1088 static int
get_all_mechanism_info(dev_t dev,caddr_t arg,int mode,int * rval)1089 get_all_mechanism_info(dev_t dev, caddr_t arg, int mode, int *rval)
1090 {
1091 	STRUCT_DECL(crypto_get_all_mechanism_info, get_all_mech);
1092 #ifdef _LP64
1093 	STRUCT_DECL(crypto_mechanism_info, mi);
1094 #else
1095 	/* LINTED E_FUNC_SET_NOT_USED */
1096 	STRUCT_DECL(crypto_mechanism_info, mi);
1097 #endif
1098 	crypto_mech_name_t mech_name;
1099 	crypto_mech_type_t mech_type;
1100 	crypto_mechanism_info_t *mech_infos = NULL;
1101 	uint_t num_mech_infos = 0;
1102 	uint_t req_count;
1103 	caddr_t u_minfos;
1104 	ulong_t offset;
1105 	int error = 0;
1106 	int rv;
1107 
1108 	req_count = 0;
1109 	STRUCT_INIT(get_all_mech, mode);
1110 	STRUCT_INIT(mi, mode);
1111 
1112 	if (copyin(arg, STRUCT_BUF(get_all_mech),
1113 	    STRUCT_SIZE(get_all_mech)) != 0) {
1114 		return (EFAULT);
1115 	}
1116 
1117 	(void) strncpy(mech_name, STRUCT_FGET(get_all_mech, mi_mechanism_name),
1118 	    CRYPTO_MAX_MECH_NAME);
1119 	mech_type = crypto_mech2id(mech_name);
1120 
1121 	if (mech_type == CRYPTO_MECH_INVALID) {
1122 		rv = CRYPTO_ARGUMENTS_BAD;
1123 		goto out1;
1124 	}
1125 
1126 	rv = crypto_get_all_mech_info(mech_type, &mech_infos, &num_mech_infos,
1127 	    KM_SLEEP);
1128 	if (rv != CRYPTO_SUCCESS) {
1129 		goto out1;
1130 	}
1131 	/* rv is CRYPTO_SUCCESS at this point */
1132 
1133 	/* Number of entries caller thinks we have */
1134 	req_count = STRUCT_FGET(get_all_mech, mi_count);
1135 
1136 	STRUCT_FSET(get_all_mech, mi_count, num_mech_infos);
1137 
1138 	/* check if buffer is too small */
1139 	if (num_mech_infos > req_count) {
1140 		rv = CRYPTO_BUFFER_TOO_SMALL;
1141 	}
1142 
1143 out1:
1144 	STRUCT_FSET(get_all_mech, mi_return_value, rv);
1145 
1146 	/* copy the first part */
1147 	if (copyout(STRUCT_BUF(get_all_mech), arg,
1148 	    STRUCT_SIZE(get_all_mech)) != 0) {
1149 		error = EFAULT;
1150 	}
1151 
1152 	/*
1153 	 * If only requesting number of entries, or there are no entries,
1154 	 * or rv is not CRYPTO_SUCCESS due to buffer too small or some other
1155 	 * crypto error, or an error occurred with copyout, stop here
1156 	 */
1157 	if (req_count == 0 || num_mech_infos == 0 || rv != CRYPTO_SUCCESS ||
1158 	    error != 0) {
1159 		goto out2;
1160 	}
1161 
1162 	/* copyout mech_infos */
1163 	offset = (ulong_t)STRUCT_FADDR(get_all_mech, mi_list);
1164 	offset -= (ulong_t)STRUCT_BUF(get_all_mech);
1165 
1166 	u_minfos = kmem_alloc(num_mech_infos * STRUCT_SIZE(mi), KM_SLEEP);
1167 	error = copyout_mechinfos(mode, arg + offset, num_mech_infos,
1168 	    mech_infos, u_minfos);
1169 	kmem_free(u_minfos, num_mech_infos * STRUCT_SIZE(mi));
1170 out2:
1171 	if (mech_infos != NULL)
1172 		crypto_free_all_mech_info(mech_infos, num_mech_infos);
1173 	return (error);
1174 }
1175 
1176 /*
1177  * Side-effects:
1178  *  1. This routine stores provider descriptor pointers in an array
1179  *     and increments each descriptor's reference count.  The array
1180  *     is stored in per-minor number storage.
1181  *  2. Destroys the old array and creates a new one every time
1182  *     this routine is called.
1183  */
1184 int
crypto_get_provider_list(crypto_minor_t * cm,uint_t * count,crypto_provider_entry_t ** array,boolean_t return_slot_list)1185 crypto_get_provider_list(crypto_minor_t *cm, uint_t *count,
1186     crypto_provider_entry_t **array, boolean_t return_slot_list)
1187 {
1188 	kcf_provider_desc_t **provider_array;
1189 	crypto_provider_entry_t *p = NULL;
1190 	uint_t provider_count;
1191 	int rval;
1192 	int i;
1193 
1194 	/*
1195 	 * Take snapshot of provider table returning only HW entries
1196 	 * that are in a usable state. Also returns logical provider entries.
1197 	 */
1198 	rval =  kcf_get_slot_list(&provider_count, &provider_array, B_FALSE);
1199 	if (rval != CRYPTO_SUCCESS)
1200 		return (rval);
1201 
1202 	/* allocate memory before taking cm->cm_lock */
1203 	if (return_slot_list) {
1204 		if (provider_count != 0) {
1205 			p = kmem_alloc(provider_count *
1206 			    sizeof (crypto_provider_entry_t), KM_SLEEP);
1207 			for (i = 0; i < provider_count; i++) {
1208 				p[i].pe_provider_id = i;
1209 				p[i].pe_mechanism_count =
1210 				    provider_array[i]->pd_mech_list_count;
1211 			}
1212 		}
1213 		*array = p;
1214 		*count = provider_count;
1215 	}
1216 
1217 	/*
1218 	 * Free existing array of providers and replace with new list.
1219 	 */
1220 	mutex_enter(&cm->cm_lock);
1221 	if (cm->cm_provider_array != NULL) {
1222 		ASSERT(cm->cm_provider_count > 0);
1223 		kcf_free_provider_tab(cm->cm_provider_count,
1224 		    cm->cm_provider_array);
1225 	}
1226 
1227 	cm->cm_provider_array = provider_array;
1228 	cm->cm_provider_count = provider_count;
1229 	mutex_exit(&cm->cm_lock);
1230 
1231 	return (CRYPTO_SUCCESS);
1232 }
1233 
1234 /*
1235  * This ioctl returns an array of crypto_provider_entry_t entries.
1236  * This is how consumers learn which hardware providers are available.
1237  */
1238 /* ARGSUSED */
1239 static int
get_provider_list(dev_t dev,caddr_t arg,int mode,int * rval)1240 get_provider_list(dev_t dev, caddr_t arg, int mode, int *rval)
1241 {
1242 	STRUCT_DECL(crypto_get_provider_list, get_list);
1243 	crypto_provider_entry_t *entries;
1244 	crypto_minor_t *cm;
1245 	size_t copyout_size;
1246 	uint_t req_count;
1247 	uint_t count;
1248 	ulong_t offset;
1249 	int rv;
1250 
1251 	STRUCT_INIT(get_list, mode);
1252 
1253 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
1254 		cmn_err(CE_WARN, "get_provider_list: failed holding minor");
1255 		return (ENXIO);
1256 	}
1257 
1258 	if (copyin(arg, STRUCT_BUF(get_list), STRUCT_SIZE(get_list)) != 0) {
1259 		crypto_release_minor(cm);
1260 		return (EFAULT);
1261 	}
1262 
1263 	rv = crypto_get_provider_list(cm, &count, &entries, RETURN_LIST);
1264 	if (rv != CRYPTO_SUCCESS) {
1265 		crypto_release_minor(cm);
1266 		STRUCT_FSET(get_list, pl_return_value, rv);
1267 		if (copyout(STRUCT_BUF(get_list), arg,
1268 		    STRUCT_SIZE(get_list)) != 0) {
1269 			return (EFAULT);
1270 		}
1271 		return (0);
1272 	}
1273 	crypto_release_minor(cm);
1274 
1275 	/* Number of slots caller thinks we have */
1276 	req_count = STRUCT_FGET(get_list, pl_count);
1277 
1278 	/* Check if only requesting number of slots */
1279 	if (req_count == 0) {
1280 
1281 		STRUCT_FSET(get_list, pl_count, count);
1282 		STRUCT_FSET(get_list, pl_return_value, CRYPTO_SUCCESS);
1283 
1284 		crypto_free_provider_list(entries, count);
1285 		if (copyout(STRUCT_BUF(get_list), arg,
1286 		    STRUCT_SIZE(get_list)) != 0) {
1287 			return (EFAULT);
1288 		}
1289 		return (0);
1290 	}
1291 
1292 	/* check if buffer is too small */
1293 	req_count = STRUCT_FGET(get_list, pl_count);
1294 	if (count > req_count) {
1295 		STRUCT_FSET(get_list, pl_count, count);
1296 		STRUCT_FSET(get_list, pl_return_value, CRYPTO_BUFFER_TOO_SMALL);
1297 		crypto_free_provider_list(entries, count);
1298 		if (copyout(STRUCT_BUF(get_list), arg,
1299 		    STRUCT_SIZE(get_list)) != 0) {
1300 			return (EFAULT);
1301 		}
1302 		return (0);
1303 	}
1304 
1305 	STRUCT_FSET(get_list, pl_count, count);
1306 	STRUCT_FSET(get_list, pl_return_value, CRYPTO_SUCCESS);
1307 
1308 	copyout_size = count * sizeof (crypto_provider_entry_t);
1309 
1310 	/* copyout the first stuff */
1311 	if (copyout(STRUCT_BUF(get_list), arg, STRUCT_SIZE(get_list)) != 0) {
1312 		crypto_free_provider_list(entries, count);
1313 		return (EFAULT);
1314 	}
1315 
1316 	if (count == 0) {
1317 		crypto_free_provider_list(entries, count);
1318 		return (0);
1319 	}
1320 
1321 	/* copyout entries */
1322 	offset = (ulong_t)STRUCT_FADDR(get_list, pl_list);
1323 	offset -= (ulong_t)STRUCT_BUF(get_list);
1324 	if (copyout(entries, arg + offset, copyout_size) != 0) {
1325 		crypto_free_provider_list(entries, count);
1326 		return (EFAULT);
1327 	}
1328 
1329 	crypto_free_provider_list(entries, count);
1330 	return (0);
1331 }
1332 
1333 static void
ext_to_provider_data(int mode,kcf_provider_desc_t * provider,crypto_provider_ext_info_t * ei,void * out)1334 ext_to_provider_data(int mode, kcf_provider_desc_t *provider,
1335     crypto_provider_ext_info_t *ei, void *out)
1336 {
1337 	STRUCT_DECL(crypto_provider_data, pd);
1338 	STRUCT_DECL(crypto_version, version);
1339 
1340 	STRUCT_INIT(pd, mode);
1341 	STRUCT_INIT(version, mode);
1342 
1343 	bcopy(provider->pd_description, STRUCT_FGET(pd, pd_prov_desc),
1344 	    CRYPTO_PROVIDER_DESCR_MAX_LEN);
1345 
1346 	bcopy(ei->ei_label, STRUCT_FGET(pd, pd_label), CRYPTO_EXT_SIZE_LABEL);
1347 	bcopy(ei->ei_manufacturerID, STRUCT_FGET(pd, pd_manufacturerID),
1348 	    CRYPTO_EXT_SIZE_MANUF);
1349 	bcopy(ei->ei_model, STRUCT_FGET(pd, pd_model), CRYPTO_EXT_SIZE_MODEL);
1350 	bcopy(ei->ei_serial_number, STRUCT_FGET(pd, pd_serial_number),
1351 	    CRYPTO_EXT_SIZE_SERIAL);
1352 	/*
1353 	 * We do not support ioctls for dual-function crypto operations yet.
1354 	 * So, we clear this flag as it might have been set by a provider.
1355 	 */
1356 	ei->ei_flags &= ~CRYPTO_EXTF_DUAL_CRYPTO_OPERATIONS;
1357 
1358 	STRUCT_FSET(pd, pd_flags, ei->ei_flags);
1359 	STRUCT_FSET(pd, pd_max_session_count, ei->ei_max_session_count);
1360 	STRUCT_FSET(pd, pd_session_count, (int)CRYPTO_UNAVAILABLE_INFO);
1361 	STRUCT_FSET(pd, pd_max_rw_session_count, ei->ei_max_session_count);
1362 	STRUCT_FSET(pd, pd_rw_session_count, (int)CRYPTO_UNAVAILABLE_INFO);
1363 	STRUCT_FSET(pd, pd_max_pin_len, ei->ei_max_pin_len);
1364 	STRUCT_FSET(pd, pd_min_pin_len, ei->ei_min_pin_len);
1365 	STRUCT_FSET(pd, pd_total_public_memory, ei->ei_total_public_memory);
1366 	STRUCT_FSET(pd, pd_free_public_memory, ei->ei_free_public_memory);
1367 	STRUCT_FSET(pd, pd_total_private_memory, ei->ei_total_private_memory);
1368 	STRUCT_FSET(pd, pd_free_private_memory, ei->ei_free_private_memory);
1369 	STRUCT_FSET(version, cv_major, ei->ei_hardware_version.cv_major);
1370 	STRUCT_FSET(version, cv_minor, ei->ei_hardware_version.cv_minor);
1371 	bcopy(STRUCT_BUF(version), STRUCT_FADDR(pd, pd_hardware_version),
1372 	    STRUCT_SIZE(version));
1373 	STRUCT_FSET(version, cv_major, ei->ei_firmware_version.cv_major);
1374 	STRUCT_FSET(version, cv_minor, ei->ei_firmware_version.cv_minor);
1375 	bcopy(STRUCT_BUF(version), STRUCT_FADDR(pd, pd_firmware_version),
1376 	    STRUCT_SIZE(version));
1377 	bcopy(ei->ei_time, STRUCT_FGET(pd, pd_time), CRYPTO_EXT_SIZE_TIME);
1378 	bcopy(STRUCT_BUF(pd), out, STRUCT_SIZE(pd));
1379 }
1380 
1381 /*
1382  * Utility routine to construct a crypto_provider_ext_info structure. Some
1383  * of the fields are constructed from information in the provider structure.
1384  * The rest of the fields have default values. We need to do this for
1385  * providers which do not support crypto_provider_management_ops routines.
1386  */
1387 static void
fabricate_ext_info(kcf_provider_desc_t * provider,crypto_provider_ext_info_t * ei)1388 fabricate_ext_info(kcf_provider_desc_t *provider,
1389     crypto_provider_ext_info_t *ei)
1390 {
1391 	/* empty label */
1392 	(void) memset(ei->ei_label, ' ', CRYPTO_EXT_SIZE_LABEL);
1393 
1394 	(void) memset(ei->ei_manufacturerID, ' ', CRYPTO_EXT_SIZE_MANUF);
1395 	(void) strncpy((char *)ei->ei_manufacturerID, "Unknown", 7);
1396 
1397 	(void) memset(ei->ei_model, ' ', CRYPTO_EXT_SIZE_MODEL);
1398 	(void) strncpy((char *)ei->ei_model, "Unknown", 7);
1399 
1400 	(void) memset(ei->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
1401 	(void) strncpy((char *)ei->ei_serial_number, "Unknown", 7);
1402 
1403 	if (KCF_PROV_RANDOM_OPS(provider) != NULL)
1404 		ei->ei_flags |= CRYPTO_EXTF_RNG;
1405 	if (KCF_PROV_DUAL_OPS(provider) != NULL)
1406 		ei->ei_flags |= CRYPTO_EXTF_DUAL_CRYPTO_OPERATIONS;
1407 
1408 	ei->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
1409 	ei->ei_max_pin_len = 0;
1410 	ei->ei_min_pin_len = 0;
1411 	ei->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
1412 	ei->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
1413 	ei->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
1414 	ei->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
1415 	ei->ei_hardware_version.cv_major = 1;
1416 	ei->ei_hardware_version.cv_minor = 0;
1417 	ei->ei_firmware_version.cv_major = 1;
1418 	ei->ei_firmware_version.cv_minor = 0;
1419 }
1420 
1421 /* ARGSUSED */
1422 static int
get_provider_info(dev_t dev,caddr_t arg,int mode,int * rval)1423 get_provider_info(dev_t dev, caddr_t arg, int mode, int *rval)
1424 {
1425 	STRUCT_DECL(crypto_get_provider_info, get_info);
1426 	crypto_minor_t *cm;
1427 	crypto_provider_id_t provider_id;
1428 	kcf_provider_desc_t *provider, *real_provider;
1429 	crypto_provider_ext_info_t *ext_info = NULL;
1430 	size_t need;
1431 	int error = 0;
1432 	int rv;
1433 	kcf_req_params_t params;
1434 
1435 	STRUCT_INIT(get_info, mode);
1436 
1437 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
1438 		cmn_err(CE_WARN, "get_provider_info: failed holding minor");
1439 		return (ENXIO);
1440 	}
1441 
1442 	if (copyin(arg, STRUCT_BUF(get_info), STRUCT_SIZE(get_info)) != 0) {
1443 		crypto_release_minor(cm);
1444 		return (EFAULT);
1445 	}
1446 
1447 	need = sizeof (crypto_provider_ext_info_t);
1448 	if ((rv = crypto_buffer_check(need)) != CRYPTO_SUCCESS) {
1449 		need = 0;
1450 		goto release_minor;
1451 	}
1452 
1453 	/* initialize provider_array */
1454 	if (cm->cm_provider_array == NULL) {
1455 		rv = crypto_get_provider_list(cm, NULL, NULL, DONT_RETURN_LIST);
1456 		if (rv != CRYPTO_SUCCESS) {
1457 			goto release_minor;
1458 		}
1459 	}
1460 
1461 	ext_info = kmem_zalloc(need, KM_SLEEP);
1462 
1463 	provider_id = STRUCT_FGET(get_info, gi_provider_id);
1464 	mutex_enter(&cm->cm_lock);
1465 	/* index must be less than count of providers */
1466 	if (provider_id >= cm->cm_provider_count) {
1467 		mutex_exit(&cm->cm_lock);
1468 		rv = CRYPTO_ARGUMENTS_BAD;
1469 		goto release_minor;
1470 	}
1471 
1472 	ASSERT(cm->cm_provider_array != NULL);
1473 	provider = cm->cm_provider_array[provider_id];
1474 	KCF_PROV_REFHOLD(provider);
1475 	mutex_exit(&cm->cm_lock);
1476 
1477 	(void) kcf_get_hardware_provider_nomech(
1478 	    CRYPTO_OPS_OFFSET(provider_ops), CRYPTO_PROVIDER_OFFSET(ext_info),
1479 	    provider, &real_provider);
1480 
1481 	if (real_provider != NULL) {
1482 		ASSERT(real_provider == provider ||
1483 		    provider->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);
1484 		KCF_WRAP_PROVMGMT_OPS_PARAMS(&params, KCF_OP_MGMT_EXTINFO,
1485 		    0, NULL, 0, NULL, 0, NULL, ext_info, provider);
1486 		rv = kcf_submit_request(real_provider, NULL, NULL, &params,
1487 		    B_FALSE);
1488 		ASSERT(rv != CRYPTO_NOT_SUPPORTED);
1489 		KCF_PROV_REFRELE(real_provider);
1490 	} else {
1491 		/* do the best we can */
1492 		fabricate_ext_info(provider, ext_info);
1493 		rv = CRYPTO_SUCCESS;
1494 	}
1495 	KCF_PROV_REFRELE(provider);
1496 
1497 	if (rv == CRYPTO_SUCCESS) {
1498 		ext_to_provider_data(mode, provider, ext_info,
1499 		    STRUCT_FADDR(get_info, gi_provider_data));
1500 	}
1501 
1502 release_minor:
1503 	CRYPTO_DECREMENT_RCTL(need);
1504 	crypto_release_minor(cm);
1505 
1506 	if (ext_info != NULL)
1507 		kmem_free(ext_info, sizeof (crypto_provider_ext_info_t));
1508 
1509 	if (error != 0)
1510 		return (error);
1511 
1512 	STRUCT_FSET(get_info, gi_return_value, rv);
1513 	if (copyout(STRUCT_BUF(get_info), arg, STRUCT_SIZE(get_info)) != 0) {
1514 		return (EFAULT);
1515 	}
1516 	return (0);
1517 }
1518 
1519 /*
1520  * This ioctl returns an array of crypto_mech_name_t entries.
1521  * This is how consumers learn which mechanisms are permitted
1522  * by a provider.
1523  */
1524 /* ARGSUSED */
1525 static int
get_provider_mechanisms(dev_t dev,caddr_t arg,int mode,int * rval)1526 get_provider_mechanisms(dev_t dev, caddr_t arg, int mode, int *rval)
1527 {
1528 	STRUCT_DECL(crypto_get_provider_mechanisms, get_mechanisms);
1529 	crypto_mech_name_t *entries;
1530 	crypto_minor_t *cm;
1531 	size_t copyout_size;
1532 	uint_t req_count;
1533 	uint_t count;
1534 	ulong_t offset;
1535 	int err;
1536 
1537 	STRUCT_INIT(get_mechanisms, mode);
1538 
1539 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
1540 		cmn_err(CE_WARN,
1541 		    "get_provider_mechanisms: failed holding minor");
1542 		return (ENXIO);
1543 	}
1544 
1545 	if (copyin(arg, STRUCT_BUF(get_mechanisms),
1546 	    STRUCT_SIZE(get_mechanisms)) != 0) {
1547 		crypto_release_minor(cm);
1548 		return (EFAULT);
1549 	}
1550 
1551 	/* get array of mechanisms from the core module */
1552 	if ((err = crypto_get_provider_mechanisms(cm,
1553 	    STRUCT_FGET(get_mechanisms, pm_provider_id),
1554 	    &count, &entries)) != 0) {
1555 		crypto_release_minor(cm);
1556 		STRUCT_FSET(get_mechanisms, pm_return_value, err);
1557 		if (copyout(STRUCT_BUF(get_mechanisms), arg,
1558 		    STRUCT_SIZE(get_mechanisms)) != 0) {
1559 			return (EFAULT);
1560 		}
1561 		return (0);
1562 	}
1563 	crypto_release_minor(cm);
1564 	/* Number of mechs caller thinks we have */
1565 	req_count = STRUCT_FGET(get_mechanisms, pm_count);
1566 
1567 	/* Check if caller is just requesting a count of mechanisms */
1568 	if (req_count == 0) {
1569 		STRUCT_FSET(get_mechanisms, pm_count, count);
1570 		STRUCT_FSET(get_mechanisms, pm_return_value, CRYPTO_SUCCESS);
1571 
1572 		crypto_free_mech_list(entries, count);
1573 		if (copyout(STRUCT_BUF(get_mechanisms), arg,
1574 		    STRUCT_SIZE(get_mechanisms)) != 0) {
1575 			return (EFAULT);
1576 		}
1577 		return (0);
1578 	}
1579 
1580 	/* check if buffer is too small */
1581 	if (count > req_count) {
1582 		STRUCT_FSET(get_mechanisms, pm_count, count);
1583 		STRUCT_FSET(get_mechanisms, pm_return_value,
1584 		    CRYPTO_BUFFER_TOO_SMALL);
1585 		crypto_free_mech_list(entries, count);
1586 		if (copyout(STRUCT_BUF(get_mechanisms), arg,
1587 		    STRUCT_SIZE(get_mechanisms)) != 0) {
1588 			return (EFAULT);
1589 		}
1590 		return (0);
1591 	}
1592 
1593 	STRUCT_FSET(get_mechanisms, pm_count, count);
1594 	STRUCT_FSET(get_mechanisms, pm_return_value, CRYPTO_SUCCESS);
1595 
1596 	copyout_size = count * sizeof (crypto_mech_name_t);
1597 
1598 	/* copyout the first stuff */
1599 	if (copyout(STRUCT_BUF(get_mechanisms), arg,
1600 	    STRUCT_SIZE(get_mechanisms)) != 0) {
1601 		crypto_free_mech_list(entries, count);
1602 		return (EFAULT);
1603 	}
1604 
1605 	if (count == 0) {
1606 		return (0);
1607 	}
1608 
1609 	/* copyout entries */
1610 	offset = (ulong_t)STRUCT_FADDR(get_mechanisms, pm_list);
1611 	offset -= (ulong_t)STRUCT_BUF(get_mechanisms);
1612 	if (copyout(entries, arg + offset, copyout_size) != 0) {
1613 		crypto_free_mech_list(entries, count);
1614 		return (EFAULT);
1615 	}
1616 
1617 	crypto_free_mech_list(entries, count);
1618 	return (0);
1619 }
1620 
1621 /*
1622  * This ioctl returns information about a provider's mechanism.
1623  */
1624 /* ARGSUSED */
1625 static int
get_provider_mechanism_info(dev_t dev,caddr_t arg,int mode,int * rval)1626 get_provider_mechanism_info(dev_t dev, caddr_t arg, int mode, int *rval)
1627 {
1628 	crypto_get_provider_mechanism_info_t mechanism_info;
1629 	crypto_minor_t *cm;
1630 	kcf_provider_desc_t *pd;
1631 	crypto_mech_info_t *mi = NULL;
1632 	int rv = CRYPTO_SUCCESS;
1633 	int i;
1634 
1635 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
1636 		cmn_err(CE_WARN,
1637 		    "get_provider_mechanism_info: failed holding minor");
1638 		return (ENXIO);
1639 	}
1640 
1641 	if (copyin(arg, &mechanism_info, sizeof (mechanism_info)) != 0) {
1642 		crypto_release_minor(cm);
1643 		return (EFAULT);
1644 	}
1645 
1646 	/* initialize provider table */
1647 	if (cm->cm_provider_array == NULL) {
1648 		rv = crypto_get_provider_list(cm, NULL, NULL, DONT_RETURN_LIST);
1649 		if (rv != CRYPTO_SUCCESS) {
1650 			mutex_enter(&cm->cm_lock);
1651 			goto fail;
1652 		}
1653 	}
1654 
1655 	/*
1656 	 * Provider ID must be less than the count of providers
1657 	 * obtained by calling get_provider_list().
1658 	 */
1659 	mutex_enter(&cm->cm_lock);
1660 	if (mechanism_info.mi_provider_id >= cm->cm_provider_count) {
1661 		rv = CRYPTO_ARGUMENTS_BAD;
1662 		goto fail;
1663 	}
1664 
1665 	pd = cm->cm_provider_array[mechanism_info.mi_provider_id];
1666 
1667 	/* First check if the provider supports the mechanism. */
1668 	for (i = 0; i < pd->pd_mech_list_count; i++) {
1669 		if (strncmp(pd->pd_mechanisms[i].cm_mech_name,
1670 		    mechanism_info.mi_mechanism_name,
1671 		    CRYPTO_MAX_MECH_NAME) == 0) {
1672 			mi = &pd->pd_mechanisms[i];
1673 			break;
1674 		}
1675 	}
1676 
1677 	if (mi == NULL) {
1678 		rv = CRYPTO_ARGUMENTS_BAD;
1679 		goto fail;
1680 	}
1681 
1682 	/* Now check if the mechanism is enabled for the provider. */
1683 	if (is_mech_disabled(pd, mechanism_info.mi_mechanism_name)) {
1684 		rv = CRYPTO_MECHANISM_INVALID;
1685 		goto fail;
1686 	}
1687 
1688 	mechanism_info.mi_min_key_size = mi->cm_min_key_length;
1689 	mechanism_info.mi_max_key_size = mi->cm_max_key_length;
1690 	mechanism_info.mi_flags = mi->cm_func_group_mask;
1691 
1692 fail:
1693 	mutex_exit(&cm->cm_lock);
1694 	crypto_release_minor(cm);
1695 	mechanism_info.mi_return_value = rv;
1696 	if (copyout(&mechanism_info, arg, sizeof (mechanism_info)) != 0) {
1697 		return (EFAULT);
1698 	}
1699 
1700 	return (0);
1701 }
1702 
1703 /*
1704  * Every open of /dev/crypto multiplexes all PKCS#11 sessions across
1705  * a single session to each provider. Calls to open and close session
1706  * are not made to providers that do not support sessions. For these
1707  * providers, a session number of 0 is passed during subsequent operations,
1708  * and it is ignored by the provider.
1709  */
1710 static int
crypto_get_provider_session(crypto_minor_t * cm,crypto_provider_id_t provider_index,crypto_provider_session_t ** output_ps)1711 crypto_get_provider_session(crypto_minor_t *cm,
1712     crypto_provider_id_t provider_index, crypto_provider_session_t **output_ps)
1713 {
1714 	kcf_provider_desc_t *pd, *real_provider;
1715 	kcf_req_params_t params;
1716 	crypto_provider_session_t *ps;
1717 	crypto_session_id_t provider_session_id = 0;
1718 	int rv;
1719 
1720 	ASSERT(MUTEX_HELD(&cm->cm_lock));
1721 
1722 	/* pd may be a logical provider */
1723 	pd = cm->cm_provider_array[provider_index];
1724 
1725 again:
1726 	/*
1727 	 * Check if there is already a session to the provider.
1728 	 * Sessions may be to a logical provider or a real provider.
1729 	 */
1730 	for (ps = cm->cm_provider_session; ps != NULL; ps = ps->ps_next) {
1731 		if (ps->ps_provider == pd)
1732 			break;
1733 	}
1734 
1735 	/* found existing session */
1736 	if (ps != NULL) {
1737 		ps->ps_refcnt++;
1738 		*output_ps = ps;
1739 		return (CRYPTO_SUCCESS);
1740 	}
1741 	mutex_exit(&cm->cm_lock);
1742 
1743 	/* find a hardware provider that supports session ops */
1744 	(void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops),
1745 	    CRYPTO_SESSION_OFFSET(session_open), pd, &real_provider);
1746 
1747 	if (real_provider != NULL) {
1748 		ASSERT(real_provider == pd ||
1749 		    pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);
1750 		/* open session to provider */
1751 		KCF_WRAP_SESSION_OPS_PARAMS(&params, KCF_OP_SESSION_OPEN,
1752 		    &provider_session_id, 0, CRYPTO_USER, NULL, 0, pd);
1753 		rv = kcf_submit_request(real_provider, NULL, NULL, &params,
1754 		    B_FALSE);
1755 		if (rv != CRYPTO_SUCCESS) {
1756 			mutex_enter(&cm->cm_lock);
1757 			KCF_PROV_REFRELE(real_provider);
1758 			return (rv);
1759 		}
1760 	}
1761 
1762 	/*
1763 	 * Check if someone opened a session to the provider
1764 	 * while we dropped the lock.
1765 	 */
1766 	mutex_enter(&cm->cm_lock);
1767 	for (ps = cm->cm_provider_session; ps != NULL; ps = ps->ps_next) {
1768 		if (ps->ps_provider == pd) {
1769 			mutex_exit(&cm->cm_lock);
1770 			if (real_provider != NULL) {
1771 				KCF_WRAP_SESSION_OPS_PARAMS(&params,
1772 				    KCF_OP_SESSION_CLOSE, NULL,
1773 				    provider_session_id, CRYPTO_USER, NULL, 0,
1774 				    pd);
1775 				(void) kcf_submit_request(real_provider, NULL,
1776 				    NULL, &params, B_FALSE);
1777 				KCF_PROV_REFRELE(real_provider);
1778 			}
1779 			mutex_enter(&cm->cm_lock);
1780 			goto again;
1781 
1782 		}
1783 	}
1784 
1785 	return (crypto_create_provider_session(cm, pd, provider_session_id,
1786 	    output_ps, real_provider));
1787 }
1788 
1789 static int
crypto_create_provider_session(crypto_minor_t * cm,kcf_provider_desc_t * pd,crypto_session_id_t sid,crypto_provider_session_t ** out_ps,kcf_provider_desc_t * real)1790 crypto_create_provider_session(crypto_minor_t *cm, kcf_provider_desc_t *pd,
1791     crypto_session_id_t sid, crypto_provider_session_t **out_ps,
1792     kcf_provider_desc_t *real)
1793 {
1794 	crypto_provider_session_t *ps;
1795 
1796 	/* allocate crypto_provider_session structure */
1797 	ps = kmem_zalloc(sizeof (crypto_provider_session_t), KM_SLEEP);
1798 
1799 	/* increment refcnt and attach to crypto_minor structure */
1800 	ps->ps_session = sid;
1801 	ps->ps_refcnt = 1;
1802 	KCF_PROV_REFHOLD(pd);
1803 	ps->ps_provider = pd;
1804 	if (real != NULL) {
1805 		ps->ps_real_provider = real;
1806 	}
1807 	ps->ps_next = cm->cm_provider_session;
1808 	cm->cm_provider_session = ps;
1809 
1810 	*out_ps = ps;
1811 	return (CRYPTO_SUCCESS);
1812 }
1813 
1814 /*
1815  * Release a provider session.
1816  * If the reference count goes to zero, then close the session
1817  * to the provider.
1818  */
1819 static void
crypto_release_provider_session(crypto_minor_t * cm,crypto_provider_session_t * provider_session)1820 crypto_release_provider_session(crypto_minor_t *cm,
1821     crypto_provider_session_t *provider_session)
1822 {
1823 	kcf_req_params_t params;
1824 	crypto_provider_session_t *ps = NULL, **prev;
1825 
1826 	ASSERT(MUTEX_HELD(&cm->cm_lock));
1827 
1828 	/* verify that provider_session is valid */
1829 	for (ps = cm->cm_provider_session, prev = &cm->cm_provider_session;
1830 	    ps != NULL; prev = &ps->ps_next, ps = ps->ps_next) {
1831 		if (ps == provider_session) {
1832 			break;
1833 		}
1834 	}
1835 
1836 	if (ps == NULL)
1837 		return;
1838 
1839 	ps->ps_refcnt--;
1840 
1841 	if (ps->ps_refcnt > 0)
1842 		return;
1843 
1844 	if (ps->ps_real_provider != NULL) {
1845 		/* close session with provider */
1846 		KCF_WRAP_SESSION_OPS_PARAMS(&params, KCF_OP_SESSION_CLOSE, NULL,
1847 		    ps->ps_session, CRYPTO_USER, NULL, 0, ps->ps_provider);
1848 		(void) kcf_submit_request(ps->ps_real_provider,
1849 		    NULL, NULL, &params, B_FALSE);
1850 		KCF_PROV_REFRELE(ps->ps_real_provider);
1851 	}
1852 	KCF_PROV_REFRELE(ps->ps_provider);
1853 	*prev = ps->ps_next;
1854 	kmem_free(ps, sizeof (*ps));
1855 }
1856 
1857 static int
grow_session_table(crypto_minor_t * cm)1858 grow_session_table(crypto_minor_t *cm)
1859 {
1860 	crypto_session_data_t **session_table;
1861 	crypto_session_data_t **new;
1862 	uint_t session_table_count;
1863 	uint_t need;
1864 	size_t current_allocation;
1865 	size_t new_allocation;
1866 	int rv;
1867 
1868 	ASSERT(MUTEX_HELD(&cm->cm_lock));
1869 
1870 	session_table_count = cm->cm_session_table_count;
1871 	session_table = cm->cm_session_table;
1872 	need = session_table_count + CRYPTO_SESSION_CHUNK;
1873 
1874 	current_allocation = session_table_count * sizeof (void *);
1875 	new_allocation = need * sizeof (void *);
1876 
1877 	/*
1878 	 * Memory needed to grow the session table is checked
1879 	 * against the project.max-crypto-memory resource control.
1880 	 */
1881 	if ((rv = crypto_buffer_check(new_allocation - current_allocation)) !=
1882 	    CRYPTO_SUCCESS) {
1883 		return (rv);
1884 	}
1885 
1886 	/* drop lock while we allocate memory */
1887 	mutex_exit(&cm->cm_lock);
1888 	new = kmem_zalloc(new_allocation, KM_SLEEP);
1889 	mutex_enter(&cm->cm_lock);
1890 
1891 	/* check if another thread increased the table size */
1892 	if (session_table_count != cm->cm_session_table_count) {
1893 		kmem_free(new, new_allocation);
1894 		return (CRYPTO_SUCCESS);
1895 	}
1896 
1897 	bcopy(session_table, new, current_allocation);
1898 	kmem_free(session_table, current_allocation);
1899 	cm->cm_session_table = new;
1900 	cm->cm_session_table_count += CRYPTO_SESSION_CHUNK;
1901 
1902 	return (CRYPTO_SUCCESS);
1903 }
1904 
1905 /*
1906  * Find unused entry in session table and return its index.
1907  * Initialize session table entry.
1908  */
1909 /* ARGSUSED */
1910 static int
crypto_open_session(dev_t dev,uint_t flags,crypto_session_id_t * session_index,crypto_provider_id_t provider_id)1911 crypto_open_session(dev_t dev, uint_t flags, crypto_session_id_t *session_index,
1912     crypto_provider_id_t provider_id)
1913 {
1914 	crypto_minor_t *cm;
1915 	int rv;
1916 	crypto_provider_session_t *ps;
1917 	kcf_provider_desc_t *provider;
1918 
1919 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
1920 		cmn_err(CE_WARN, "crypto_open_session: failed holding minor");
1921 		return (CRYPTO_FAILED);
1922 	}
1923 
1924 	/* initialize provider_array */
1925 	if (cm->cm_provider_array == NULL) {
1926 		rv = crypto_get_provider_list(cm, NULL, NULL, DONT_RETURN_LIST);
1927 		if (rv != 0) {
1928 			crypto_release_minor(cm);
1929 			return (rv);
1930 		}
1931 	}
1932 
1933 	mutex_enter(&cm->cm_lock);
1934 	/* index must be less than count of providers */
1935 	if (provider_id >= cm->cm_provider_count) {
1936 		mutex_exit(&cm->cm_lock);
1937 		crypto_release_minor(cm);
1938 		return (CRYPTO_INVALID_PROVIDER_ID);
1939 	}
1940 	ASSERT(cm->cm_provider_array != NULL);
1941 
1942 	rv = crypto_get_provider_session(cm, provider_id, &ps);
1943 	if (rv != CRYPTO_SUCCESS) {
1944 		mutex_exit(&cm->cm_lock);
1945 		crypto_release_minor(cm);
1946 		return (rv);
1947 	}
1948 	provider = cm->cm_provider_array[provider_id];
1949 
1950 	rv = crypto_create_session_ptr(cm, provider, ps, session_index);
1951 	mutex_exit(&cm->cm_lock);
1952 	crypto_release_minor(cm);
1953 	return (rv);
1954 
1955 }
1956 
1957 static int
crypto_create_session_ptr(crypto_minor_t * cm,kcf_provider_desc_t * provider,crypto_provider_session_t * ps,crypto_session_id_t * session_index)1958 crypto_create_session_ptr(crypto_minor_t *cm, kcf_provider_desc_t *provider,
1959     crypto_provider_session_t *ps,  crypto_session_id_t *session_index)
1960 {
1961 	crypto_session_data_t **session_table;
1962 	crypto_session_data_t *sp;
1963 	uint_t session_table_count;
1964 	uint_t i;
1965 	int rv;
1966 
1967 	ASSERT(MUTEX_HELD(&cm->cm_lock));
1968 
1969 again:
1970 	session_table_count = cm->cm_session_table_count;
1971 	session_table = cm->cm_session_table;
1972 
1973 	/* session handles start with 1 */
1974 	for (i = 1; i < session_table_count; i++) {
1975 		if (session_table[i] == NULL)
1976 			break;
1977 	}
1978 
1979 	if (i == session_table_count || session_table_count == 0) {
1980 		if ((rv = grow_session_table(cm)) != CRYPTO_SUCCESS) {
1981 			crypto_release_provider_session(cm, ps);
1982 			return (rv);
1983 		}
1984 		goto again;
1985 	}
1986 
1987 	sp = kmem_cache_alloc(crypto_session_cache, KM_SLEEP);
1988 	sp->sd_flags = 0;
1989 	sp->sd_find_init_cookie = NULL;
1990 	sp->sd_digest_ctx = NULL;
1991 	sp->sd_encr_ctx = NULL;
1992 	sp->sd_decr_ctx = NULL;
1993 	sp->sd_sign_ctx = NULL;
1994 	sp->sd_verify_ctx = NULL;
1995 	sp->sd_mac_ctx = NULL;
1996 	sp->sd_sign_recover_ctx = NULL;
1997 	sp->sd_verify_recover_ctx = NULL;
1998 	mutex_init(&sp->sd_lock, NULL, MUTEX_DRIVER, NULL);
1999 	cv_init(&sp->sd_cv, NULL, CV_DRIVER, NULL);
2000 	KCF_PROV_REFHOLD(provider);
2001 	sp->sd_provider = provider;
2002 	sp->sd_provider_session = ps;
2003 
2004 	/* See the comment for CRYPTO_PRE_APPROVED_LIMIT. */
2005 	if ((rv = crypto_buffer_check(crypto_pre_approved_limit)) !=
2006 	    CRYPTO_SUCCESS) {
2007 		sp->sd_pre_approved_amount = 0;
2008 	} else {
2009 		sp->sd_pre_approved_amount = (int)crypto_pre_approved_limit;
2010 	}
2011 
2012 	cm->cm_session_table[i] = sp;
2013 	if (session_index != NULL)
2014 		*session_index = i;
2015 
2016 	return (CRYPTO_SUCCESS);
2017 }
2018 
2019 /*
2020  * Close a session.
2021  */
2022 static int
crypto_close_session(dev_t dev,crypto_session_id_t session_index)2023 crypto_close_session(dev_t dev, crypto_session_id_t session_index)
2024 {
2025 	crypto_session_data_t **session_table;
2026 	crypto_session_data_t *sp;
2027 	crypto_minor_t *cm;
2028 
2029 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
2030 		cmn_err(CE_WARN, "crypto_close_session: failed holding minor");
2031 		return (CRYPTO_FAILED);
2032 	}
2033 
2034 	mutex_enter(&cm->cm_lock);
2035 	session_table = cm->cm_session_table;
2036 
2037 	if ((session_index) == 0 ||
2038 	    (session_index >= cm->cm_session_table_count)) {
2039 		mutex_exit(&cm->cm_lock);
2040 		crypto_release_minor(cm);
2041 		return (CRYPTO_SESSION_HANDLE_INVALID);
2042 	}
2043 
2044 	sp = session_table[session_index];
2045 	if (sp == NULL) {
2046 		mutex_exit(&cm->cm_lock);
2047 		crypto_release_minor(cm);
2048 		return (CRYPTO_SESSION_HANDLE_INVALID);
2049 	}
2050 	/*
2051 	 * If session is in use, free it when the thread
2052 	 * finishes with the session.
2053 	 */
2054 	mutex_enter(&sp->sd_lock);
2055 	if (sp->sd_flags & CRYPTO_SESSION_IS_BUSY) {
2056 		sp->sd_flags |= CRYPTO_SESSION_IS_CLOSED;
2057 		mutex_exit(&sp->sd_lock);
2058 	} else {
2059 		ASSERT(sp->sd_pre_approved_amount == 0 ||
2060 		    sp->sd_pre_approved_amount == crypto_pre_approved_limit);
2061 		CRYPTO_DECREMENT_RCTL(sp->sd_pre_approved_amount);
2062 
2063 		if (sp->sd_find_init_cookie != NULL) {
2064 			(void) crypto_free_find_ctx(sp);
2065 		}
2066 
2067 		crypto_release_provider_session(cm, sp->sd_provider_session);
2068 		KCF_PROV_REFRELE(sp->sd_provider);
2069 		CRYPTO_CANCEL_ALL_CTX(sp);
2070 		mutex_destroy(&sp->sd_lock);
2071 		cv_destroy(&sp->sd_cv);
2072 		kmem_cache_free(crypto_session_cache, sp);
2073 		session_table[session_index] = NULL;
2074 	}
2075 
2076 	mutex_exit(&cm->cm_lock);
2077 	crypto_release_minor(cm);
2078 
2079 	return (CRYPTO_SUCCESS);
2080 }
2081 
2082 /*
2083  * This ioctl opens a session and returns the session ID in os_session.
2084  */
2085 /* ARGSUSED */
2086 static int
open_session(dev_t dev,caddr_t arg,int mode,int * rval)2087 open_session(dev_t dev, caddr_t arg, int mode, int *rval)
2088 {
2089 	crypto_open_session_t open_session;
2090 	crypto_session_id_t session;
2091 	int rv;
2092 
2093 	if (copyin(arg, &open_session, sizeof (open_session)) != 0)
2094 		return (EFAULT);
2095 
2096 	rv = crypto_open_session(dev, open_session.os_flags,
2097 	    &session, open_session.os_provider_id);
2098 	if (rv != CRYPTO_SUCCESS) {
2099 		open_session.os_return_value = rv;
2100 		if (copyout(&open_session, arg, sizeof (open_session)) != 0) {
2101 			return (EFAULT);
2102 		}
2103 		return (0);
2104 	}
2105 
2106 	open_session.os_session = session;
2107 	open_session.os_return_value = CRYPTO_SUCCESS;
2108 
2109 	if (copyout(&open_session, arg, sizeof (open_session)) != 0) {
2110 		return (EFAULT);
2111 	}
2112 	return (0);
2113 }
2114 
2115 /*
2116  * This ioctl closes a session.
2117  */
2118 /* ARGSUSED */
2119 static int
close_session(dev_t dev,caddr_t arg,int mode,int * rval)2120 close_session(dev_t dev, caddr_t arg, int mode, int *rval)
2121 {
2122 	crypto_close_session_t close_session;
2123 	int rv;
2124 
2125 	if (copyin(arg, &close_session, sizeof (close_session)) != 0)
2126 		return (EFAULT);
2127 
2128 	rv = crypto_close_session(dev, close_session.cs_session);
2129 	close_session.cs_return_value = rv;
2130 	if (copyout(&close_session, arg, sizeof (close_session)) != 0) {
2131 		return (EFAULT);
2132 	}
2133 	return (0);
2134 }
2135 
2136 /*
2137  * Copy data model dependent mechanism structure into a kernel mechanism
2138  * structure.  Allocate param storage if necessary.
2139  */
2140 static boolean_t
copyin_mech(int mode,crypto_session_data_t * sp,crypto_mechanism_t * in_mech,crypto_mechanism_t * out_mech,size_t * out_rctl_bytes,boolean_t * out_rctl_chk,int * out_rv,int * out_error)2141 copyin_mech(int mode, crypto_session_data_t *sp, crypto_mechanism_t *in_mech,
2142     crypto_mechanism_t *out_mech, size_t *out_rctl_bytes,
2143     boolean_t *out_rctl_chk, int *out_rv, int *out_error)
2144 {
2145 	STRUCT_DECL(crypto_mechanism, mech);
2146 	caddr_t param;
2147 	size_t param_len;
2148 	size_t rctl_bytes = 0;
2149 	int error = 0;
2150 	int rv = 0;
2151 
2152 	STRUCT_INIT(mech, mode);
2153 	bcopy(in_mech, STRUCT_BUF(mech), STRUCT_SIZE(mech));
2154 	param = STRUCT_FGETP(mech, cm_param);
2155 	param_len = STRUCT_FGET(mech, cm_param_len);
2156 	out_mech->cm_type = STRUCT_FGET(mech, cm_type);
2157 	out_mech->cm_param = NULL;
2158 	out_mech->cm_param_len = 0;
2159 	if (param != NULL && param_len != 0) {
2160 		if (param_len > crypto_max_buffer_len) {
2161 			cmn_err(CE_NOTE, "copyin_mech: buffer greater than "
2162 			    "%ld bytes, pid = %d", crypto_max_buffer_len,
2163 			    curproc->p_pid);
2164 			rv = CRYPTO_ARGUMENTS_BAD;
2165 			goto out;
2166 		}
2167 
2168 		rv = CRYPTO_BUFFER_CHECK(sp, param_len, *out_rctl_chk);
2169 		if (rv != CRYPTO_SUCCESS) {
2170 			goto out;
2171 		}
2172 		rctl_bytes = param_len;
2173 
2174 		out_mech->cm_param = kmem_alloc(param_len, KM_SLEEP);
2175 		if (copyin((char *)param, out_mech->cm_param, param_len) != 0) {
2176 			kmem_free(out_mech->cm_param, param_len);
2177 			out_mech->cm_param = NULL;
2178 			error = EFAULT;
2179 			goto out;
2180 		}
2181 		out_mech->cm_param_len = param_len;
2182 	}
2183 out:
2184 	*out_rctl_bytes = rctl_bytes;
2185 	*out_rv = rv;
2186 	*out_error = error;
2187 	return ((rv | error) ? B_FALSE : B_TRUE);
2188 }
2189 
2190 /*
2191  * Free key attributes when key type is CRYPTO_KEY_ATTR_LIST.
2192  * The crypto_key structure is not freed.
2193  */
2194 static void
crypto_free_key_attributes(crypto_key_t * key)2195 crypto_free_key_attributes(crypto_key_t *key)
2196 {
2197 	crypto_object_attribute_t *attrs;
2198 	size_t len = 0;
2199 	int i;
2200 
2201 	ASSERT(key->ck_format == CRYPTO_KEY_ATTR_LIST);
2202 	if (key->ck_count == 0 || key->ck_attrs == NULL)
2203 		return;
2204 
2205 	/* compute the size of the container */
2206 	len = key->ck_count * sizeof (crypto_object_attribute_t);
2207 
2208 	/* total up the size of all attributes in the container */
2209 	for (i = 0; i < key->ck_count; i++) {
2210 		attrs = &key->ck_attrs[i];
2211 		if (attrs->oa_value_len != 0 &&
2212 		    attrs->oa_value != NULL) {
2213 			len += roundup(attrs->oa_value_len, sizeof (caddr_t));
2214 		}
2215 	}
2216 
2217 	bzero(key->ck_attrs, len);
2218 	kmem_free(key->ck_attrs, len);
2219 }
2220 
2221 /*
2222  * Frees allocated storage in the key structure, but doesn't free
2223  * the key structure.
2224  */
2225 static void
free_crypto_key(crypto_key_t * key)2226 free_crypto_key(crypto_key_t *key)
2227 {
2228 	switch (key->ck_format) {
2229 	case CRYPTO_KEY_RAW: {
2230 		size_t len;
2231 
2232 		if (key->ck_length == 0 || key->ck_data == NULL)
2233 			break;
2234 
2235 		len = CRYPTO_BITS2BYTES(key->ck_length);
2236 		bzero(key->ck_data, len);
2237 		kmem_free(key->ck_data, len);
2238 		break;
2239 	}
2240 
2241 	case CRYPTO_KEY_ATTR_LIST:
2242 		crypto_free_key_attributes(key);
2243 		break;
2244 
2245 	default:
2246 		break;
2247 	}
2248 }
2249 
2250 /*
2251  * Copy in an array of crypto_object_attribute structures from user-space.
2252  * Kernel memory is allocated for the array and the value of each attribute
2253  * in the array.  Since unprivileged users can specify the size of attributes,
2254  * the amount of memory needed is charged against the
2255  * project.max-crypto-memory resource control.
2256  *
2257  * Attribute values are copied in from user-space if copyin_value is set to
2258  * B_TRUE.  This routine returns B_TRUE if the copyin was successful.
2259  */
2260 static boolean_t
copyin_attributes(int mode,crypto_session_data_t * sp,uint_t count,caddr_t oc_attributes,crypto_object_attribute_t ** k_attrs_out,size_t * k_attrs_size_out,caddr_t * u_attrs_out,int * out_rv,int * out_error,size_t * out_rctl_bytes,boolean_t * out_rctl_chk,boolean_t copyin_value)2261 copyin_attributes(int mode, crypto_session_data_t *sp,
2262     uint_t count, caddr_t oc_attributes,
2263     crypto_object_attribute_t **k_attrs_out, size_t *k_attrs_size_out,
2264     caddr_t *u_attrs_out, int *out_rv, int *out_error, size_t *out_rctl_bytes,
2265     boolean_t *out_rctl_chk, boolean_t copyin_value)
2266 {
2267 	STRUCT_DECL(crypto_object_attribute, oa);
2268 	crypto_object_attribute_t *k_attrs = NULL;
2269 	caddr_t attrs = NULL, ap, p, value;
2270 	caddr_t k_attrs_buf;
2271 	size_t k_attrs_len;
2272 	size_t k_attrs_buf_len = 0;
2273 	size_t k_attrs_total_len = 0;
2274 	size_t tmp_len;
2275 	size_t rctl_bytes = 0;
2276 	size_t len = 0;
2277 	size_t value_len;
2278 	int error = 0;
2279 	int rv = 0;
2280 	int i;
2281 
2282 	STRUCT_INIT(oa, mode);
2283 
2284 	if (count == 0) {
2285 		rv = CRYPTO_SUCCESS;
2286 		goto out;
2287 	}
2288 
2289 	if (count > CRYPTO_MAX_ATTRIBUTE_COUNT) {
2290 		rv = CRYPTO_ARGUMENTS_BAD;
2291 		goto out;
2292 	}
2293 
2294 	/* compute size of crypto_object_attribute array */
2295 	len = count * STRUCT_SIZE(oa);
2296 
2297 	/* this allocation is not charged against the user's resource limit */
2298 	attrs = kmem_alloc(len, KM_SLEEP);
2299 	if (copyin(oc_attributes, attrs, len) != 0) {
2300 		error = EFAULT;
2301 		goto out;
2302 	}
2303 
2304 	/* figure out how much memory to allocate for all of the attributes */
2305 	ap = attrs;
2306 	for (i = 0; i < count; i++) {
2307 		bcopy(ap, STRUCT_BUF(oa), STRUCT_SIZE(oa));
2308 		tmp_len = roundup(STRUCT_FGET(oa, oa_value_len),
2309 		    sizeof (caddr_t));
2310 		if (tmp_len > crypto_max_buffer_len) {
2311 			cmn_err(CE_NOTE, "copyin_attributes: buffer greater "
2312 			    "than %ld bytes, pid = %d", crypto_max_buffer_len,
2313 			    curproc->p_pid);
2314 			rv = CRYPTO_ARGUMENTS_BAD;
2315 			goto out;
2316 		}
2317 		if (STRUCT_FGETP(oa, oa_value) != NULL)
2318 			k_attrs_buf_len += tmp_len;
2319 		ap += STRUCT_SIZE(oa);
2320 	}
2321 
2322 	k_attrs_len = count * sizeof (crypto_object_attribute_t);
2323 	k_attrs_total_len = k_attrs_buf_len + k_attrs_len;
2324 
2325 	rv = CRYPTO_BUFFER_CHECK(sp, k_attrs_total_len, *out_rctl_chk);
2326 	if (rv != CRYPTO_SUCCESS) {
2327 		goto out;
2328 	}
2329 	rctl_bytes = k_attrs_total_len;
2330 
2331 	/* one big allocation for everything */
2332 	k_attrs = kmem_alloc(k_attrs_total_len, KM_SLEEP);
2333 	k_attrs_buf = (char *)k_attrs + k_attrs_len;
2334 
2335 	ap = attrs;
2336 	p = k_attrs_buf;
2337 	for (i = 0; i < count; i++) {
2338 		bcopy(ap, STRUCT_BUF(oa), STRUCT_SIZE(oa));
2339 		k_attrs[i].oa_type = STRUCT_FGET(oa, oa_type);
2340 		value = STRUCT_FGETP(oa, oa_value);
2341 		value_len = STRUCT_FGET(oa, oa_value_len);
2342 		if (value != NULL && value_len != 0 && copyin_value) {
2343 			if (copyin(value, p, value_len) != 0) {
2344 				kmem_free(k_attrs, k_attrs_total_len);
2345 				k_attrs = NULL;
2346 				error = EFAULT;
2347 				goto out;
2348 			}
2349 		}
2350 
2351 		if (value != NULL) {
2352 			k_attrs[i].oa_value = p;
2353 			p += roundup(value_len, sizeof (caddr_t));
2354 		} else {
2355 			k_attrs[i].oa_value = NULL;
2356 		}
2357 		k_attrs[i].oa_value_len = value_len;
2358 		ap += STRUCT_SIZE(oa);
2359 	}
2360 out:
2361 	if (attrs != NULL) {
2362 		/*
2363 		 * Free the array if there is a failure or the caller
2364 		 * doesn't want the array to be returned.
2365 		 */
2366 		if (error != 0 || rv != CRYPTO_SUCCESS || u_attrs_out == NULL) {
2367 			kmem_free(attrs, len);
2368 			attrs = NULL;
2369 		}
2370 	}
2371 
2372 	if (u_attrs_out != NULL)
2373 		*u_attrs_out = attrs;
2374 	if (k_attrs_size_out != NULL)
2375 		*k_attrs_size_out = k_attrs_total_len;
2376 	*k_attrs_out = k_attrs;
2377 	*out_rctl_bytes = rctl_bytes;
2378 	*out_rv = rv;
2379 	*out_error = error;
2380 	return ((rv | error) ? B_FALSE : B_TRUE);
2381 }
2382 
2383 /*
2384  * Copy data model dependent raw key into a kernel key
2385  * structure.  Checks key length or attribute lengths against
2386  * resource controls before allocating memory.  Returns B_TRUE
2387  * if both error and rv are set to 0.
2388  */
2389 static boolean_t
copyin_key(int mode,crypto_session_data_t * sp,crypto_key_t * in_key,crypto_key_t * out_key,size_t * out_rctl_bytes,boolean_t * out_rctl_chk,int * out_rv,int * out_error)2390 copyin_key(int mode, crypto_session_data_t *sp, crypto_key_t *in_key,
2391     crypto_key_t *out_key, size_t *out_rctl_bytes,
2392     boolean_t *out_rctl_chk, int *out_rv, int *out_error)
2393 {
2394 	STRUCT_DECL(crypto_key, key);
2395 	crypto_object_attribute_t *k_attrs = NULL;
2396 	size_t key_bits;
2397 	size_t key_bytes = 0;
2398 	size_t rctl_bytes = 0;
2399 	int count;
2400 	int error = 0;
2401 	int rv = CRYPTO_SUCCESS;
2402 
2403 	STRUCT_INIT(key, mode);
2404 	bcopy(in_key, STRUCT_BUF(key), STRUCT_SIZE(key));
2405 	out_key->ck_format = STRUCT_FGET(key, ck_format);
2406 	switch (out_key->ck_format) {
2407 	case CRYPTO_KEY_RAW:
2408 		key_bits = STRUCT_FGET(key, ck_length);
2409 		if (key_bits != 0) {
2410 			if (key_bits >
2411 			    (CRYPTO_BYTES2BITS(crypto_max_buffer_len))) {
2412 				cmn_err(CE_NOTE, "copyin_key: buffer greater "
2413 				    "than %ld bytes, pid = %d",
2414 				    crypto_max_buffer_len, curproc->p_pid);
2415 				rv = CRYPTO_ARGUMENTS_BAD;
2416 				goto out;
2417 			}
2418 			key_bytes = CRYPTO_BITS2BYTES(key_bits);
2419 
2420 			rv = CRYPTO_BUFFER_CHECK(sp, key_bytes,
2421 			    *out_rctl_chk);
2422 			if (rv != CRYPTO_SUCCESS) {
2423 				goto out;
2424 			}
2425 			rctl_bytes = key_bytes;
2426 
2427 			out_key->ck_data = kmem_alloc(key_bytes, KM_SLEEP);
2428 
2429 			if (copyin((char *)STRUCT_FGETP(key, ck_data),
2430 			    out_key->ck_data, key_bytes) != 0) {
2431 				kmem_free(out_key->ck_data, key_bytes);
2432 				out_key->ck_data = NULL;
2433 				out_key->ck_length = 0;
2434 				error = EFAULT;
2435 				goto out;
2436 			}
2437 		}
2438 		out_key->ck_length = (ulong_t)key_bits;
2439 		break;
2440 
2441 	case CRYPTO_KEY_ATTR_LIST:
2442 		count = STRUCT_FGET(key, ck_count);
2443 
2444 		if (copyin_attributes(mode, sp, count,
2445 		    (caddr_t)STRUCT_FGETP(key, ck_attrs), &k_attrs, NULL, NULL,
2446 		    &rv, &error, &rctl_bytes, out_rctl_chk, B_TRUE)) {
2447 			out_key->ck_count = count;
2448 			out_key->ck_attrs = k_attrs;
2449 			k_attrs = NULL;
2450 		} else {
2451 			out_key->ck_count = 0;
2452 			out_key->ck_attrs = NULL;
2453 		}
2454 		break;
2455 
2456 	case CRYPTO_KEY_REFERENCE:
2457 		out_key->ck_obj_id = STRUCT_FGET(key, ck_obj_id);
2458 		break;
2459 
2460 	default:
2461 		rv = CRYPTO_ARGUMENTS_BAD;
2462 	}
2463 
2464 out:
2465 	*out_rctl_bytes = rctl_bytes;
2466 	*out_rv = rv;
2467 	*out_error = error;
2468 	return ((rv | error) ? B_FALSE : B_TRUE);
2469 }
2470 
2471 /*
2472  * This routine does two things:
2473  * 1. Given a crypto_minor structure and a session ID, it returns
2474  *    a valid session pointer.
2475  * 2. It checks that the provider, to which the session has been opened,
2476  *    has not been removed.
2477  */
2478 static boolean_t
get_session_ptr(crypto_session_id_t i,crypto_minor_t * cm,crypto_session_data_t ** session_ptr,int * out_error,int * out_rv)2479 get_session_ptr(crypto_session_id_t i, crypto_minor_t *cm,
2480     crypto_session_data_t **session_ptr, int *out_error, int *out_rv)
2481 {
2482 	crypto_session_data_t *sp = NULL;
2483 	int rv = CRYPTO_SESSION_HANDLE_INVALID;
2484 	int error = 0;
2485 
2486 	mutex_enter(&cm->cm_lock);
2487 	if ((i < cm->cm_session_table_count) &&
2488 	    (cm->cm_session_table[i] != NULL)) {
2489 		sp = cm->cm_session_table[i];
2490 		mutex_enter(&sp->sd_lock);
2491 		mutex_exit(&cm->cm_lock);
2492 		while (sp->sd_flags & CRYPTO_SESSION_IS_BUSY) {
2493 			if (cv_wait_sig(&sp->sd_cv, &sp->sd_lock) == 0) {
2494 				mutex_exit(&sp->sd_lock);
2495 				sp = NULL;
2496 				error = EINTR;
2497 				goto out;
2498 			}
2499 		}
2500 
2501 		if (sp->sd_flags & CRYPTO_SESSION_IS_CLOSED) {
2502 			mutex_exit(&sp->sd_lock);
2503 			sp = NULL;
2504 			goto out;
2505 		}
2506 
2507 		if (KCF_IS_PROV_REMOVED(sp->sd_provider)) {
2508 			mutex_exit(&sp->sd_lock);
2509 			sp = NULL;
2510 			rv = CRYPTO_DEVICE_ERROR;
2511 			goto out;
2512 		}
2513 
2514 		rv = CRYPTO_SUCCESS;
2515 		sp->sd_flags |= CRYPTO_SESSION_IS_BUSY;
2516 		mutex_exit(&sp->sd_lock);
2517 	} else {
2518 		mutex_exit(&cm->cm_lock);
2519 	}
2520 out:
2521 	*session_ptr = sp;
2522 	*out_error = error;
2523 	*out_rv = rv;
2524 	return ((rv == CRYPTO_SUCCESS && error == 0) ? B_TRUE : B_FALSE);
2525 }
2526 
2527 #define	CRYPTO_SESSION_RELE(s)	if ((s) != NULL) {	\
2528 	mutex_enter(&((s)->sd_lock));			\
2529 	(s)->sd_flags &= ~CRYPTO_SESSION_IS_BUSY;	\
2530 	cv_broadcast(&(s)->sd_cv);			\
2531 	mutex_exit(&((s)->sd_lock));			\
2532 }
2533 
2534 /* ARGSUSED */
2535 static int
encrypt_init(dev_t dev,caddr_t arg,int mode,int * rval)2536 encrypt_init(dev_t dev, caddr_t arg, int mode, int *rval)
2537 {
2538 	return (cipher_init(dev, arg, mode, crypto_encrypt_init_prov));
2539 }
2540 
2541 /* ARGSUSED */
2542 static int
decrypt_init(dev_t dev,caddr_t arg,int mode,int * rval)2543 decrypt_init(dev_t dev, caddr_t arg, int mode, int *rval)
2544 {
2545 	return (cipher_init(dev, arg, mode, crypto_decrypt_init_prov));
2546 }
2547 
2548 /*
2549  * umech is a mechanism structure that has been copied from user address
2550  * space into kernel address space. Only one copyin has been done.
2551  * The mechanism parameter, if non-null, still points to user address space.
2552  * If the mechanism parameter contains pointers, they are pointers into
2553  * user address space.
2554  *
2555  * kmech is a umech with all pointers and structures in kernel address space.
2556  *
2557  * This routine calls the provider's entry point to copy a umech parameter
2558  * into kernel address space. Kernel memory is allocated by the provider.
2559  */
2560 static int
crypto_provider_copyin_mech_param(kcf_provider_desc_t * pd,crypto_mechanism_t * umech,crypto_mechanism_t * kmech,int mode,int * error)2561 crypto_provider_copyin_mech_param(kcf_provider_desc_t *pd,
2562     crypto_mechanism_t *umech, crypto_mechanism_t *kmech, int mode, int *error)
2563 {
2564 	crypto_mech_type_t provider_mech_type;
2565 	int rv;
2566 
2567 	/* get the provider's mech number */
2568 	provider_mech_type = KCF_TO_PROV_MECHNUM(pd, umech->cm_type);
2569 
2570 	kmech->cm_param = NULL;
2571 	kmech->cm_param_len = 0;
2572 	kmech->cm_type = provider_mech_type;
2573 	rv = KCF_PROV_COPYIN_MECH(pd, umech, kmech, error, mode);
2574 	kmech->cm_type = umech->cm_type;
2575 
2576 	return (rv);
2577 }
2578 
2579 /*
2580  * umech is a mechanism structure that has been copied from user address
2581  * space into kernel address space. Only one copyin has been done.
2582  * The mechanism parameter, if non-null, still points to user address space.
2583  * If the mechanism parameter contains pointers, they are pointers into
2584  * user address space.
2585  *
2586  * kmech is a umech with all pointers and structures in kernel address space.
2587  *
2588  * This routine calls the provider's entry point to copy a kmech parameter
2589  * into user address space using umech as a template containing
2590  * user address pointers.
2591  */
2592 static int
crypto_provider_copyout_mech_param(kcf_provider_desc_t * pd,crypto_mechanism_t * kmech,crypto_mechanism_t * umech,int mode,int * error)2593 crypto_provider_copyout_mech_param(kcf_provider_desc_t *pd,
2594     crypto_mechanism_t *kmech, crypto_mechanism_t *umech, int mode, int *error)
2595 {
2596 	crypto_mech_type_t provider_mech_type;
2597 	int rv;
2598 
2599 	/* get the provider's mech number */
2600 	provider_mech_type = KCF_TO_PROV_MECHNUM(pd, umech->cm_type);
2601 
2602 	kmech->cm_type = provider_mech_type;
2603 	rv = KCF_PROV_COPYOUT_MECH(pd, kmech, umech, error, mode);
2604 	kmech->cm_type = umech->cm_type;
2605 
2606 	return (rv);
2607 }
2608 
2609 /*
2610  * Call the provider's entry point to free kernel memory that has been
2611  * allocated for the mechanism's parameter.
2612  */
2613 static void
crypto_free_mech(kcf_provider_desc_t * pd,boolean_t allocated_by_crypto_module,crypto_mechanism_t * mech)2614 crypto_free_mech(kcf_provider_desc_t *pd, boolean_t allocated_by_crypto_module,
2615     crypto_mechanism_t *mech)
2616 {
2617 	crypto_mech_type_t provider_mech_type;
2618 
2619 	if (allocated_by_crypto_module) {
2620 		if (mech->cm_param != NULL)
2621 			kmem_free(mech->cm_param, mech->cm_param_len);
2622 	} else {
2623 		/* get the provider's mech number */
2624 		provider_mech_type = KCF_TO_PROV_MECHNUM(pd, mech->cm_type);
2625 
2626 		if (mech->cm_param != NULL && mech->cm_param_len != 0) {
2627 			mech->cm_type = provider_mech_type;
2628 			(void) KCF_PROV_FREE_MECH(pd, mech);
2629 		}
2630 	}
2631 }
2632 
2633 /*
2634  * ASSUMPTION: crypto_encrypt_init and crypto_decrypt_init
2635  * structures are identical except for field names.
2636  */
2637 static int
cipher_init(dev_t dev,caddr_t arg,int mode,int (* init)(crypto_provider_t,crypto_session_id_t,crypto_mechanism_t *,crypto_key_t *,crypto_ctx_template_t,crypto_context_t *,crypto_call_req_t *))2638 cipher_init(dev_t dev, caddr_t arg, int mode, int (*init)(crypto_provider_t,
2639     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
2640     crypto_ctx_template_t, crypto_context_t *, crypto_call_req_t *))
2641 {
2642 	STRUCT_DECL(crypto_encrypt_init, encrypt_init);
2643 	kcf_provider_desc_t *real_provider = NULL;
2644 	crypto_session_id_t session_id;
2645 	crypto_mechanism_t mech;
2646 	crypto_key_t key;
2647 	crypto_minor_t *cm;
2648 	crypto_session_data_t *sp = NULL;
2649 	crypto_context_t cc;
2650 	crypto_ctx_t **ctxpp;
2651 	size_t mech_rctl_bytes = 0;
2652 	boolean_t mech_rctl_chk = B_FALSE;
2653 	size_t key_rctl_bytes = 0;
2654 	boolean_t key_rctl_chk = B_FALSE;
2655 	int error = 0;
2656 	int rv;
2657 	boolean_t allocated_by_crypto_module = B_FALSE;
2658 	crypto_func_group_t fg;
2659 
2660 	STRUCT_INIT(encrypt_init, mode);
2661 
2662 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
2663 		cmn_err(CE_WARN, "cipher_init: failed holding minor");
2664 		return (ENXIO);
2665 	}
2666 
2667 	if (copyin(arg, STRUCT_BUF(encrypt_init),
2668 	    STRUCT_SIZE(encrypt_init)) != 0) {
2669 		crypto_release_minor(cm);
2670 		return (EFAULT);
2671 	}
2672 
2673 	mech.cm_param = NULL;
2674 	bzero(&key, sizeof (crypto_key_t));
2675 
2676 	session_id = STRUCT_FGET(encrypt_init, ei_session);
2677 
2678 	if (!get_session_ptr(session_id, cm, &sp, &error, &rv)) {
2679 		goto out;
2680 	}
2681 
2682 	bcopy(STRUCT_FADDR(encrypt_init, ei_mech), &mech.cm_type,
2683 	    sizeof (crypto_mech_type_t));
2684 
2685 	if (init == crypto_encrypt_init_prov) {
2686 		fg = CRYPTO_FG_ENCRYPT;
2687 	} else {
2688 		fg = CRYPTO_FG_DECRYPT;
2689 	}
2690 
2691 	/* We need the key length for provider selection so copy it in now. */
2692 	if (!copyin_key(mode, sp, STRUCT_FADDR(encrypt_init, ei_key), &key,
2693 	    &key_rctl_bytes, &key_rctl_chk, &rv, &error)) {
2694 		goto out;
2695 	}
2696 
2697 	if ((rv = kcf_get_hardware_provider(mech.cm_type, &key,
2698 	    CRYPTO_MECH_INVALID, NULL, sp->sd_provider, &real_provider, fg))
2699 	    != CRYPTO_SUCCESS) {
2700 		goto out;
2701 	}
2702 
2703 	rv = crypto_provider_copyin_mech_param(real_provider,
2704 	    STRUCT_FADDR(encrypt_init, ei_mech), &mech, mode, &error);
2705 
2706 	if (rv == CRYPTO_NOT_SUPPORTED) {
2707 		allocated_by_crypto_module = B_TRUE;
2708 		if (!copyin_mech(mode, sp, STRUCT_FADDR(encrypt_init, ei_mech),
2709 		    &mech, &mech_rctl_bytes, &mech_rctl_chk, &rv, &error)) {
2710 			goto out;
2711 		}
2712 	} else {
2713 		if (rv != CRYPTO_SUCCESS)
2714 			goto out;
2715 	}
2716 
2717 	rv = (init)(real_provider, sp->sd_provider_session->ps_session,
2718 	    &mech, &key, NULL, &cc, NULL);
2719 
2720 	/*
2721 	 * Check if a context already exists. If so, it means it is being
2722 	 * abandoned. So, cancel it to avoid leaking it.
2723 	 */
2724 	ctxpp = (init == crypto_encrypt_init_prov) ?
2725 	    &sp->sd_encr_ctx : &sp->sd_decr_ctx;
2726 
2727 	if (*ctxpp != NULL)
2728 		CRYPTO_CANCEL_CTX(ctxpp);
2729 	*ctxpp = (rv == CRYPTO_SUCCESS) ? cc : NULL;
2730 
2731 out:
2732 	CRYPTO_DECREMENT_RCTL_SESSION(sp, mech_rctl_bytes, mech_rctl_chk);
2733 	CRYPTO_DECREMENT_RCTL_SESSION(sp, key_rctl_bytes, key_rctl_chk);
2734 	CRYPTO_SESSION_RELE(sp);
2735 	crypto_release_minor(cm);
2736 
2737 	if (real_provider != NULL) {
2738 		crypto_free_mech(real_provider,
2739 		    allocated_by_crypto_module, &mech);
2740 		KCF_PROV_REFRELE(real_provider);
2741 	}
2742 
2743 	free_crypto_key(&key);
2744 
2745 	if (error != 0)
2746 		/* XXX free context */
2747 		return (error);
2748 
2749 	STRUCT_FSET(encrypt_init, ei_return_value, rv);
2750 	if (copyout(STRUCT_BUF(encrypt_init), arg,
2751 	    STRUCT_SIZE(encrypt_init)) != 0) {
2752 		/* XXX free context */
2753 		return (EFAULT);
2754 	}
2755 	return (0);
2756 }
2757 
2758 /* ARGSUSED */
2759 static int
encrypt(dev_t dev,caddr_t arg,int mode,int * rval)2760 encrypt(dev_t dev, caddr_t arg, int mode, int *rval)
2761 {
2762 	return (cipher(dev, arg, mode, crypto_encrypt_single));
2763 }
2764 
2765 /* ARGSUSED */
2766 static int
decrypt(dev_t dev,caddr_t arg,int mode,int * rval)2767 decrypt(dev_t dev, caddr_t arg, int mode, int *rval)
2768 {
2769 	return (cipher(dev, arg, mode, crypto_decrypt_single));
2770 }
2771 
2772 /*
2773  * ASSUMPTION: crypto_encrypt and crypto_decrypt structures
2774  * are identical except for field names.
2775  */
2776 static int
cipher(dev_t dev,caddr_t arg,int mode,int (* single)(crypto_context_t,crypto_data_t *,crypto_data_t *,crypto_call_req_t *))2777 cipher(dev_t dev, caddr_t arg, int mode,
2778     int (*single)(crypto_context_t, crypto_data_t *, crypto_data_t *,
2779     crypto_call_req_t *))
2780 {
2781 	STRUCT_DECL(crypto_encrypt, encrypt);
2782 	crypto_session_id_t session_id;
2783 	crypto_minor_t *cm;
2784 	crypto_session_data_t *sp = NULL;
2785 	crypto_ctx_t **ctxpp;
2786 	crypto_data_t data, encr;
2787 	size_t datalen, encrlen, need = 0;
2788 	boolean_t do_inplace;
2789 	char *encrbuf;
2790 	int error = 0;
2791 	int rv;
2792 	boolean_t rctl_chk = B_FALSE;
2793 
2794 	do_inplace = B_FALSE;
2795 	STRUCT_INIT(encrypt, mode);
2796 
2797 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
2798 		cmn_err(CE_WARN, "cipher: failed holding minor");
2799 		return (ENXIO);
2800 	}
2801 
2802 	if (copyin(arg, STRUCT_BUF(encrypt), STRUCT_SIZE(encrypt)) != 0) {
2803 		crypto_release_minor(cm);
2804 		return (EFAULT);
2805 	}
2806 
2807 	data.cd_raw.iov_base = NULL;
2808 	encr.cd_raw.iov_base = NULL;
2809 
2810 	datalen = STRUCT_FGET(encrypt, ce_datalen);
2811 	encrlen = STRUCT_FGET(encrypt, ce_encrlen);
2812 
2813 	/*
2814 	 * Don't allocate output buffer unless both buffer pointer and
2815 	 * buffer length are not NULL or 0 (length).
2816 	 */
2817 	encrbuf = STRUCT_FGETP(encrypt, ce_encrbuf);
2818 	if (encrbuf == NULL || encrlen == 0) {
2819 		encrlen = 0;
2820 	}
2821 
2822 	if (datalen > crypto_max_buffer_len ||
2823 	    encrlen > crypto_max_buffer_len) {
2824 		cmn_err(CE_NOTE, "cipher: buffer greater than %ld bytes, "
2825 		    "pid = %d", crypto_max_buffer_len, curproc->p_pid);
2826 		rv = CRYPTO_ARGUMENTS_BAD;
2827 		goto release_minor;
2828 	}
2829 
2830 	session_id = STRUCT_FGET(encrypt, ce_session);
2831 
2832 	if (!get_session_ptr(session_id, cm, &sp, &error, &rv))  {
2833 		goto release_minor;
2834 	}
2835 
2836 	do_inplace = (STRUCT_FGET(encrypt, ce_flags) &
2837 	    CRYPTO_INPLACE_OPERATION) != 0;
2838 	need = do_inplace ? datalen : datalen + encrlen;
2839 
2840 	if ((rv = CRYPTO_BUFFER_CHECK(sp, need, rctl_chk)) !=
2841 	    CRYPTO_SUCCESS) {
2842 		need = 0;
2843 		goto release_minor;
2844 	}
2845 
2846 	INIT_RAW_CRYPTO_DATA(data, datalen);
2847 	data.cd_miscdata = NULL;
2848 
2849 	if (datalen != 0 && copyin(STRUCT_FGETP(encrypt, ce_databuf),
2850 	    data.cd_raw.iov_base, datalen) != 0) {
2851 		error = EFAULT;
2852 		goto release_minor;
2853 	}
2854 
2855 	if (do_inplace) {
2856 		/* set out = in for in-place */
2857 		encr = data;
2858 	} else {
2859 		INIT_RAW_CRYPTO_DATA(encr, encrlen);
2860 	}
2861 
2862 	ctxpp = (single == crypto_encrypt_single) ?
2863 	    &sp->sd_encr_ctx : &sp->sd_decr_ctx;
2864 
2865 	if (do_inplace)
2866 		/* specify in-place buffers with output = NULL */
2867 		rv = (single)(*ctxpp, &encr, NULL, NULL);
2868 	else
2869 		rv = (single)(*ctxpp, &data, &encr, NULL);
2870 
2871 	if (KCF_CONTEXT_DONE(rv))
2872 		*ctxpp = NULL;
2873 
2874 	if (rv == CRYPTO_SUCCESS) {
2875 		ASSERT(encr.cd_length <= encrlen);
2876 		if (encr.cd_length != 0 && copyout(encr.cd_raw.iov_base,
2877 		    encrbuf, encr.cd_length) != 0) {
2878 			error = EFAULT;
2879 			goto release_minor;
2880 		}
2881 		STRUCT_FSET(encrypt, ce_encrlen,
2882 		    (ulong_t)encr.cd_length);
2883 	}
2884 
2885 	if (rv == CRYPTO_BUFFER_TOO_SMALL) {
2886 		/*
2887 		 * The providers return CRYPTO_BUFFER_TOO_SMALL even for case 1
2888 		 * of section 11.2 of the pkcs11 spec. We catch it here and
2889 		 * provide the correct pkcs11 return value.
2890 		 */
2891 		if (STRUCT_FGETP(encrypt, ce_encrbuf) == NULL)
2892 			rv = CRYPTO_SUCCESS;
2893 		STRUCT_FSET(encrypt, ce_encrlen,
2894 		    (ulong_t)encr.cd_length);
2895 	}
2896 
2897 release_minor:
2898 	CRYPTO_DECREMENT_RCTL_SESSION(sp, need, rctl_chk);
2899 	CRYPTO_SESSION_RELE(sp);
2900 	crypto_release_minor(cm);
2901 
2902 	if (data.cd_raw.iov_base != NULL)
2903 		kmem_free(data.cd_raw.iov_base, datalen);
2904 
2905 	if (!do_inplace && encr.cd_raw.iov_base != NULL)
2906 		kmem_free(encr.cd_raw.iov_base, encrlen);
2907 
2908 	if (error != 0)
2909 		return (error);
2910 
2911 	STRUCT_FSET(encrypt, ce_return_value, rv);
2912 	if (copyout(STRUCT_BUF(encrypt), arg, STRUCT_SIZE(encrypt)) != 0) {
2913 		return (EFAULT);
2914 	}
2915 	return (0);
2916 }
2917 
2918 /* ARGSUSED */
2919 static int
encrypt_update(dev_t dev,caddr_t arg,int mode,int * rval)2920 encrypt_update(dev_t dev, caddr_t arg, int mode, int *rval)
2921 {
2922 	return (cipher_update(dev, arg, mode, crypto_encrypt_update));
2923 }
2924 
2925 /* ARGSUSED */
2926 static int
decrypt_update(dev_t dev,caddr_t arg,int mode,int * rval)2927 decrypt_update(dev_t dev, caddr_t arg, int mode, int *rval)
2928 {
2929 	return (cipher_update(dev, arg, mode, crypto_decrypt_update));
2930 }
2931 
2932 /*
2933  * ASSUMPTION: crypto_encrypt_update and crypto_decrypt_update
2934  * structures are identical except for field names.
2935  */
2936 static int
cipher_update(dev_t dev,caddr_t arg,int mode,int (* update)(crypto_context_t,crypto_data_t *,crypto_data_t *,crypto_call_req_t *))2937 cipher_update(dev_t dev, caddr_t arg, int mode,
2938     int (*update)(crypto_context_t, crypto_data_t *, crypto_data_t *,
2939     crypto_call_req_t *))
2940 {
2941 	STRUCT_DECL(crypto_encrypt_update, encrypt_update);
2942 	crypto_session_id_t session_id;
2943 	crypto_minor_t *cm;
2944 	crypto_session_data_t *sp = NULL;
2945 	crypto_ctx_t **ctxpp;
2946 	crypto_data_t data, encr;
2947 	size_t datalen, encrlen, need = 0;
2948 	boolean_t do_inplace;
2949 	char *encrbuf;
2950 	int error = 0;
2951 	int rv;
2952 	boolean_t rctl_chk = B_FALSE;
2953 
2954 	do_inplace = B_FALSE;
2955 	STRUCT_INIT(encrypt_update, mode);
2956 
2957 	if ((cm = crypto_hold_minor(getminor(dev))) == NULL) {
2958 		cmn_err(CE_WARN, "cipher_update: failed holding minor");
2959 		return (ENXIO);
2960 	}
2961 
2962 	if (copyin(arg, STRUCT_BUF(encrypt_update),
2963 	    STRUCT_SIZE(encrypt_update)) != 0) {
2964 		crypto_release_minor(cm);
2965 		return (EFAULT);
2966 	}
2967 
2968 	data.cd_raw.iov_base = NULL;
2969 	encr.cd_raw.iov_base = NULL;
2970 
2971 	datalen = STRUCT_FGET(encrypt_update, eu_datalen);
2972 	encrlen = STRUCT_FGET(encrypt_update, eu_encrlen);
2973 
2974 	/*
2975 	 * Don't allocate output buffer unless both buffer pointer and
2976 	 * buffer length are not NULL or 0 (length).
2977 	 */
2978 	encrbuf = STRUCT_FGETP(encrypt_update, eu_encrbuf);
2979 	if (encrbuf == NULL || encrlen == 0) {
2980 		encrlen = 0;
2981 	}
2982 
2983 	if (datalen > crypto_max_buffer_len ||
2984 	    encrlen > crypto_max_buffer_len) {
2985 		cmn_err(CE_NOTE, "cipher_update: buffer greater than %ld "
2986 		    "bytes, pid = %d", crypto_max_buffer_len, curproc->p_pid);
2987 		rv = CRYPTO_ARGUMENTS_BAD;
2988 		goto out;
2989 	}
2990 
2991 	session_id = STRUCT_FGET(encrypt_update, eu_session);
2992 
2993 	if (!get_session_ptr(session_id, cm, &sp, &error, &rv))  {
2994 		goto out;
2995 	}
2996 
2997 	do_inplace = (STRUCT_FGET(encrypt_update, eu_flags) &
2998 	    CRYPTO_INPLACE_OPERATION) != 0;
2999 	need = do_inplace ? datalen : datalen + encrlen;
3000 
3001 	if ((rv = CRYPTO_BUFFER_CHECK(sp, need, rctl_chk)) !=
3002 	    CRYPTO_SUCCESS) {
3003 		need = 0;
3004 		goto out;
3005 	}
3006 
3007 	INIT_RAW_CRYPTO_DATA(data, datalen);
3008 	data.cd_miscdata = NULL;
3009 
3010 	if (datalen != 0 && copyin(STRUCT_FGETP(encrypt_update, eu_databuf),
3011 	    data.cd_raw.iov_base, datalen) != 0) {
3012 		error = EFAULT;
3013 		goto out;
3014 	}
3015 
3016 	if (do_inplace) {
3017 		/* specify in-place buffers with output = input */
3018 		encr = data;
3019 	} else {
3020 		INIT_RAW_CRYPTO_DATA(encr, encrlen);
3021 	}
3022 
3023 	ctxpp = (update == crypto_encrypt_update) ?
3024 	    &sp->sd_encr_ctx : &sp->sd_decr_ctx;
3025 
3026 	if (do_inplace)
3027 		/* specify in-place buffers with output = NULL */
3028 		rv = (update)(*ctxpp, &encr, NULL, NULL);
3029 	else
3030