1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25/*
26 * Copyright 2019 Nexenta Systems, Inc.  All rights reserved.
27 * Copyright (c) 2013 by Delphix. All rights reserved.
28 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
29 */
30
31#include <sys/conf.h>
32#include <sys/file.h>
33#include <sys/ddi.h>
34#include <sys/sunddi.h>
35#include <sys/modctl.h>
36#include <sys/scsi/scsi.h>
37#include <sys/scsi/generic/persist.h>
38#include <sys/scsi/impl/scsi_reset_notify.h>
39#include <sys/disp.h>
40#include <sys/byteorder.h>
41#include <sys/atomic.h>
42#include <sys/ethernet.h>
43#include <sys/sdt.h>
44#include <sys/nvpair.h>
45#include <sys/zone.h>
46#include <sys/id_space.h>
47
48#include <sys/stmf.h>
49#include <sys/lpif.h>
50#include <sys/portif.h>
51#include <sys/stmf_ioctl.h>
52#include <sys/pppt_ic_if.h>
53
54#include "stmf_impl.h"
55#include "lun_map.h"
56#include "stmf_state.h"
57#include "stmf_stats.h"
58
59/*
60 * Lock order:
61 * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
62 */
63
64static uint64_t stmf_session_counter = 0;
65static uint16_t stmf_rtpid_counter = 0;
66/* start messages at 1 */
67static uint64_t stmf_proxy_msg_id = 1;
68#define	MSG_ID_TM_BIT	0x8000000000000000
69#define	ALIGNED_TO_8BYTE_BOUNDARY(i)	(((i) + 7) & ~7)
70
71/*
72 * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up
73 * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman
74 * seconds. If it does, we trigger a panic to inform the user of hung I/O
75 * blocking us for too long.
76 */
77boolean_t stmf_io_deadman_enabled = B_TRUE;
78int stmf_io_deadman = 1000;			/* seconds */
79
80struct stmf_svc_clocks;
81
82static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
83static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
84static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
85	void **result);
86static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
87static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
88static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
89	cred_t *credp, int *rval);
90static int stmf_get_stmf_state(stmf_state_desc_t *std);
91static int stmf_set_stmf_state(stmf_state_desc_t *std);
92static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
93    char *info);
94static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
95static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
96
97static void stmf_task_audit(stmf_i_scsi_task_t *itask,
98    task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
99
100static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
101static char stmf_ctoi(char c);
102stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
103void stmf_svc_init();
104stmf_status_t stmf_svc_fini();
105void stmf_svc(void *arg);
106static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu);
107void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
108static void stmf_svc_kill_obj_requests(void *obj);
109static void stmf_svc_timeout(struct stmf_svc_clocks *);
110void stmf_check_freetask();
111void stmf_abort_target_reset(scsi_task_t *task);
112stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
113							int target_reset);
114void stmf_target_reset_poll(struct scsi_task *task);
115void stmf_handle_lun_reset(scsi_task_t *task);
116void stmf_handle_target_reset(scsi_task_t *task);
117void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
118int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
119    uint32_t *err_ret);
120int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
121int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
122    uint32_t *err_ret);
123void stmf_delete_ppd(stmf_pp_data_t *ppd);
124void stmf_delete_all_ppds();
125void stmf_trace_clear();
126void stmf_worker_init();
127stmf_status_t stmf_worker_fini();
128void stmf_worker_task(void *arg);
129static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
130static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
131    uint32_t type);
132static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
133static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
134static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
135static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
136void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
137
138/* pppt modhandle */
139ddi_modhandle_t pppt_mod;
140
141/* pppt modload imported functions */
142stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
143stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
144stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
145stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
146stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
147stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
148stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
149stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
150stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
151stmf_ic_tx_msg_func_t ic_tx_msg;
152stmf_ic_msg_free_func_t ic_msg_free;
153
154static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
155static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
156static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
157
158static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
159    stmf_data_buf_t *dbuf);
160static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
161    stmf_data_buf_t *dbuf);
162
163static void stmf_update_kstat_lu_q(scsi_task_t *, void());
164static void stmf_update_kstat_lport_q(scsi_task_t *, void());
165static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
166static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
167static hrtime_t stmf_update_rport_timestamps(hrtime_t *start_tstamp,
168    hrtime_t *done_tstamp, stmf_i_scsi_task_t *itask);
169
170static int stmf_irport_compare(const void *void_irport1,
171    const void *void_irport2);
172static void stmf_create_kstat_rport(stmf_i_remote_port_t *irport);
173static void stmf_destroy_kstat_rport(stmf_i_remote_port_t *irport);
174static int stmf_kstat_rport_update(kstat_t *ksp, int rw);
175static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
176static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
177static stmf_i_remote_port_t *stmf_irport_register(
178    scsi_devid_desc_t *rport_devid);
179static stmf_i_remote_port_t *stmf_irport_lookup_locked(
180    scsi_devid_desc_t *rport_devid);
181static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
182
183extern struct mod_ops mod_driverops;
184
185/* =====[ Tunables ]===== */
186/* Internal tracing */
187volatile int	stmf_trace_on = 0;
188volatile int	stmf_trace_buf_size = (1 * 1024 * 1024);
189/*
190 * The reason default task timeout is 75 is because we want the
191 * host to timeout 1st and mostly host timeout is 60 seconds.
192 */
193volatile int	stmf_default_task_timeout = 75;
194/*
195 * Setting this to one means, you are responsible for config load and keeping
196 * things in sync with persistent database.
197 */
198volatile int	stmf_allow_modunload = 0;
199
200volatile int stmf_nworkers = 512;
201
202/* === [ Debugging and fault injection ] === */
203#ifdef	DEBUG
204volatile int stmf_drop_task_counter = 0;
205volatile int stmf_drop_buf_counter = 0;
206
207#endif
208
209stmf_state_t		stmf_state;
210static stmf_lu_t	*dlun0;
211
212static uint8_t stmf_first_zero[] =
213	{ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
214static uint8_t stmf_first_one[] =
215	{ 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
216
217static kmutex_t	trace_buf_lock;
218static int	trace_buf_size;
219static int	trace_buf_curndx;
220caddr_t	stmf_trace_buf;
221
222static enum {
223	STMF_WORKERS_DISABLED = 0,
224	STMF_WORKERS_ENABLING,
225	STMF_WORKERS_ENABLED
226} stmf_workers_state = STMF_WORKERS_DISABLED;
227static kmutex_t	stmf_worker_sel_mx;
228volatile uint32_t stmf_nworkers_cur = 0; /* # of workers currently running */
229static int stmf_worker_sel_counter = 0;
230static uint32_t stmf_cur_ntasks = 0;
231static clock_t stmf_wm_next = 0;
232static int stmf_nworkers_accepting_cmds;
233static stmf_worker_t *stmf_workers = NULL;
234static clock_t stmf_worker_scale_down_timer = 0;
235static int stmf_worker_scale_down_qd = 0;
236
237static struct cb_ops stmf_cb_ops = {
238	stmf_open,			/* open */
239	stmf_close,			/* close */
240	nodev,				/* strategy */
241	nodev,				/* print */
242	nodev,				/* dump */
243	nodev,				/* read */
244	nodev,				/* write */
245	stmf_ioctl,			/* ioctl */
246	nodev,				/* devmap */
247	nodev,				/* mmap */
248	nodev,				/* segmap */
249	nochpoll,			/* chpoll */
250	ddi_prop_op,			/* cb_prop_op */
251	0,				/* streamtab */
252	D_NEW | D_MP,			/* cb_flag */
253	CB_REV,				/* rev */
254	nodev,				/* aread */
255	nodev				/* awrite */
256};
257
258static struct dev_ops stmf_ops = {
259	DEVO_REV,
260	0,
261	stmf_getinfo,
262	nulldev,		/* identify */
263	nulldev,		/* probe */
264	stmf_attach,
265	stmf_detach,
266	nodev,			/* reset */
267	&stmf_cb_ops,
268	NULL,			/* bus_ops */
269	NULL			/* power */
270};
271
272#define	STMF_NAME		"COMSTAR STMF"
273#define	STMF_MODULE_NAME	"stmf"
274
275static struct modldrv modldrv = {
276	&mod_driverops,
277	STMF_NAME,
278	&stmf_ops
279};
280
281static struct modlinkage modlinkage = {
282	MODREV_1,
283	&modldrv,
284	NULL
285};
286
287int
288_init(void)
289{
290	int ret;
291
292	ret = mod_install(&modlinkage);
293	if (ret)
294		return (ret);
295	stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
296	trace_buf_size = stmf_trace_buf_size;
297	trace_buf_curndx = 0;
298	mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
299	mutex_init(&stmf_worker_sel_mx, NULL, MUTEX_ADAPTIVE, 0);
300	bzero(&stmf_state, sizeof (stmf_state_t));
301	/* STMF service is off by default */
302	stmf_state.stmf_service_running = 0;
303	/* default lu/lport states are online */
304	stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
305	stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
306	mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
307	cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
308	stmf_session_counter = (uint64_t)ddi_get_lbolt();
309	avl_create(&stmf_state.stmf_irportlist,
310	    stmf_irport_compare, sizeof (stmf_i_remote_port_t),
311	    offsetof(stmf_i_remote_port_t, irport_ln));
312	stmf_state.stmf_ilport_inst_space =
313	    id_space_create("lport-instances", 0, MAX_ILPORT);
314	stmf_state.stmf_irport_inst_space =
315	    id_space_create("rport-instances", 0, MAX_IRPORT);
316	stmf_view_init();
317	stmf_svc_init();
318	stmf_dlun_init();
319	return (ret);
320}
321
322int
323_fini(void)
324{
325	int ret;
326	stmf_i_remote_port_t	*irport;
327	void			*avl_dest_cookie = NULL;
328
329	if (stmf_state.stmf_service_running)
330		return (EBUSY);
331	if ((!stmf_allow_modunload) &&
332	    (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
333		return (EBUSY);
334	}
335	if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
336		return (EBUSY);
337	}
338	if (stmf_dlun_fini() != STMF_SUCCESS)
339		return (EBUSY);
340	if (stmf_worker_fini() != STMF_SUCCESS) {
341		stmf_dlun_init();
342		return (EBUSY);
343	}
344	if (stmf_svc_fini() != STMF_SUCCESS) {
345		stmf_dlun_init();
346		stmf_worker_init();
347		return (EBUSY);
348	}
349
350	ret = mod_remove(&modlinkage);
351	if (ret) {
352		stmf_svc_init();
353		stmf_dlun_init();
354		stmf_worker_init();
355		return (ret);
356	}
357
358	stmf_view_clear_config();
359
360	while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
361	    &avl_dest_cookie)) != NULL)
362		stmf_irport_destroy(irport);
363	avl_destroy(&stmf_state.stmf_irportlist);
364	id_space_destroy(stmf_state.stmf_ilport_inst_space);
365	id_space_destroy(stmf_state.stmf_irport_inst_space);
366
367	kmem_free(stmf_trace_buf, stmf_trace_buf_size);
368	mutex_destroy(&trace_buf_lock);
369	mutex_destroy(&stmf_state.stmf_lock);
370	mutex_destroy(&stmf_worker_sel_mx);
371	cv_destroy(&stmf_state.stmf_cv);
372	return (ret);
373}
374
375int
376_info(struct modinfo *modinfop)
377{
378	return (mod_info(&modlinkage, modinfop));
379}
380
381/* ARGSUSED */
382static int
383stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
384{
385	switch (cmd) {
386	case DDI_INFO_DEVT2DEVINFO:
387		*result = stmf_state.stmf_dip;
388		break;
389	case DDI_INFO_DEVT2INSTANCE:
390		*result =
391		    (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
392		break;
393	default:
394		return (DDI_FAILURE);
395	}
396
397	return (DDI_SUCCESS);
398}
399
400static int
401stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
402{
403	switch (cmd) {
404	case DDI_ATTACH:
405		stmf_state.stmf_dip = dip;
406
407		if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
408		    DDI_NT_STMF, 0) != DDI_SUCCESS) {
409			break;
410		}
411		ddi_report_dev(dip);
412		return (DDI_SUCCESS);
413	}
414
415	return (DDI_FAILURE);
416}
417
418static int
419stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
420{
421	switch (cmd) {
422	case DDI_DETACH:
423		ddi_remove_minor_node(dip, 0);
424		return (DDI_SUCCESS);
425	}
426
427	return (DDI_FAILURE);
428}
429
430/* ARGSUSED */
431static int
432stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
433{
434	mutex_enter(&stmf_state.stmf_lock);
435	if (stmf_state.stmf_exclusive_open) {
436		mutex_exit(&stmf_state.stmf_lock);
437		return (EBUSY);
438	}
439	if (flag & FEXCL) {
440		if (stmf_state.stmf_opened) {
441			mutex_exit(&stmf_state.stmf_lock);
442			return (EBUSY);
443		}
444		stmf_state.stmf_exclusive_open = 1;
445	}
446	stmf_state.stmf_opened = 1;
447	mutex_exit(&stmf_state.stmf_lock);
448	return (0);
449}
450
451/* ARGSUSED */
452static int
453stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
454{
455	mutex_enter(&stmf_state.stmf_lock);
456	stmf_state.stmf_opened = 0;
457	if (stmf_state.stmf_exclusive_open &&
458	    (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
459		stmf_state.stmf_config_state = STMF_CONFIG_NONE;
460		stmf_delete_all_ppds();
461		stmf_view_clear_config();
462		stmf_view_init();
463	}
464	stmf_state.stmf_exclusive_open = 0;
465	mutex_exit(&stmf_state.stmf_lock);
466	return (0);
467}
468
469int
470stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
471    void **ibuf, void **obuf)
472{
473	int ret;
474
475	*ibuf = NULL;
476	*obuf = NULL;
477	*iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
478
479	ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
480	if (ret)
481		return (EFAULT);
482	if ((*iocd)->stmf_version != STMF_VERSION_1) {
483		ret = EINVAL;
484		goto copyin_iocdata_done;
485	}
486	if ((*iocd)->stmf_ibuf_size) {
487		*ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
488		ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
489		    *ibuf, (*iocd)->stmf_ibuf_size, mode);
490	}
491	if ((*iocd)->stmf_obuf_size)
492		*obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
493
494	if (ret == 0)
495		return (0);
496	ret = EFAULT;
497copyin_iocdata_done:;
498	if (*obuf) {
499		kmem_free(*obuf, (*iocd)->stmf_obuf_size);
500		*obuf = NULL;
501	}
502	if (*ibuf) {
503		kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
504		*ibuf = NULL;
505	}
506	kmem_free(*iocd, sizeof (stmf_iocdata_t));
507	return (ret);
508}
509
510int
511stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
512{
513	int ret;
514
515	if (iocd->stmf_obuf_size) {
516		ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
517		    iocd->stmf_obuf_size, mode);
518		if (ret)
519			return (EFAULT);
520	}
521	ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
522	if (ret)
523		return (EFAULT);
524	return (0);
525}
526
527/* ARGSUSED */
528static int
529stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
530    cred_t *credp, int *rval)
531{
532	stmf_iocdata_t *iocd;
533	void *ibuf = NULL, *obuf = NULL;
534	slist_lu_t *luid_list;
535	slist_target_port_t *lportid_list;
536	stmf_i_lu_t *ilu;
537	stmf_i_local_port_t *ilport;
538	stmf_i_scsi_session_t *iss;
539	slist_scsi_session_t *iss_list;
540	sioc_lu_props_t *lup;
541	sioc_target_port_props_t *lportp;
542	stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
543	uint64_t *ppi_token = NULL;
544	uint8_t *p_id, *id;
545	stmf_state_desc_t *std;
546	stmf_status_t ctl_ret;
547	stmf_state_change_info_t ssi;
548	int ret = 0;
549	uint32_t n;
550	int i;
551	stmf_group_op_data_t *grp_entry;
552	stmf_group_name_t *grpname;
553	stmf_view_op_entry_t *ve;
554	stmf_id_type_t idtype;
555	stmf_id_data_t *id_entry;
556	stmf_id_list_t	*id_list;
557	stmf_view_entry_t *view_entry;
558	stmf_set_props_t *stmf_set_props;
559	uint32_t	veid;
560	if ((cmd & 0xff000000) != STMF_IOCTL) {
561		return (ENOTTY);
562	}
563
564	if (drv_priv(credp) != 0) {
565		return (EPERM);
566	}
567
568	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
569	if (ret)
570		return (ret);
571	iocd->stmf_error = 0;
572
573	switch (cmd) {
574	case STMF_IOCTL_LU_LIST:
575		/* retrieves both registered/unregistered */
576		mutex_enter(&stmf_state.stmf_lock);
577		id_list = &stmf_state.stmf_luid_list;
578		n = min(id_list->id_count,
579		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
580		iocd->stmf_obuf_max_nentries = id_list->id_count;
581		luid_list = (slist_lu_t *)obuf;
582		id_entry = id_list->idl_head;
583		for (i = 0; i < n; i++) {
584			bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
585			id_entry = id_entry->id_next;
586		}
587
588		n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
589		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
590			id = (uint8_t *)ilu->ilu_lu->lu_id;
591			if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
592				iocd->stmf_obuf_max_nentries++;
593				if (i < n) {
594					bcopy(id + 4, luid_list[i].lu_guid,
595					    sizeof (slist_lu_t));
596					i++;
597				}
598			}
599		}
600		iocd->stmf_obuf_nentries = i;
601		mutex_exit(&stmf_state.stmf_lock);
602		break;
603
604	case STMF_IOCTL_REG_LU_LIST:
605		mutex_enter(&stmf_state.stmf_lock);
606		iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
607		n = min(stmf_state.stmf_nlus,
608		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
609		iocd->stmf_obuf_nentries = n;
610		ilu = stmf_state.stmf_ilulist;
611		luid_list = (slist_lu_t *)obuf;
612		for (i = 0; i < n; i++) {
613			uint8_t *id;
614			id = (uint8_t *)ilu->ilu_lu->lu_id;
615			bcopy(id + 4, luid_list[i].lu_guid, 16);
616			ilu = ilu->ilu_next;
617		}
618		mutex_exit(&stmf_state.stmf_lock);
619		break;
620
621	case STMF_IOCTL_VE_LU_LIST:
622		mutex_enter(&stmf_state.stmf_lock);
623		id_list = &stmf_state.stmf_luid_list;
624		n = min(id_list->id_count,
625		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
626		iocd->stmf_obuf_max_nentries = id_list->id_count;
627		iocd->stmf_obuf_nentries = n;
628		luid_list = (slist_lu_t *)obuf;
629		id_entry = id_list->idl_head;
630		for (i = 0; i < n; i++) {
631			bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
632			id_entry = id_entry->id_next;
633		}
634		mutex_exit(&stmf_state.stmf_lock);
635		break;
636
637	case STMF_IOCTL_TARGET_PORT_LIST:
638		mutex_enter(&stmf_state.stmf_lock);
639		iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
640		n = min(stmf_state.stmf_nlports,
641		    (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
642		iocd->stmf_obuf_nentries = n;
643		ilport = stmf_state.stmf_ilportlist;
644		lportid_list = (slist_target_port_t *)obuf;
645		for (i = 0; i < n; i++) {
646			uint8_t *id;
647			id = (uint8_t *)ilport->ilport_lport->lport_id;
648			bcopy(id, lportid_list[i].target, id[3] + 4);
649			ilport = ilport->ilport_next;
650		}
651		mutex_exit(&stmf_state.stmf_lock);
652		break;
653
654	case STMF_IOCTL_SESSION_LIST:
655		p_id = (uint8_t *)ibuf;
656		if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
657		    (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
658			ret = EINVAL;
659			break;
660		}
661		mutex_enter(&stmf_state.stmf_lock);
662		for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
663		    ilport->ilport_next) {
664			uint8_t *id;
665			id = (uint8_t *)ilport->ilport_lport->lport_id;
666			if ((p_id[3] == id[3]) &&
667			    (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
668				break;
669			}
670		}
671		if (ilport == NULL) {
672			mutex_exit(&stmf_state.stmf_lock);
673			ret = ENOENT;
674			break;
675		}
676		iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
677		n = min(ilport->ilport_nsessions,
678		    (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
679		iocd->stmf_obuf_nentries = n;
680		iss = ilport->ilport_ss_list;
681		iss_list = (slist_scsi_session_t *)obuf;
682		for (i = 0; i < n; i++) {
683			uint8_t *id;
684			id = (uint8_t *)iss->iss_ss->ss_rport_id;
685			bcopy(id, iss_list[i].initiator, id[3] + 4);
686			iss_list[i].creation_time = (uint32_t)
687			    iss->iss_creation_time;
688			if (iss->iss_ss->ss_rport_alias) {
689				(void) strncpy(iss_list[i].alias,
690				    iss->iss_ss->ss_rport_alias, 255);
691				iss_list[i].alias[255] = '\0';
692			} else {
693				iss_list[i].alias[0] = '\0';
694			}
695			iss = iss->iss_next;
696		}
697		mutex_exit(&stmf_state.stmf_lock);
698		break;
699
700	case STMF_IOCTL_GET_LU_PROPERTIES:
701		p_id = (uint8_t *)ibuf;
702		if ((iocd->stmf_ibuf_size < 16) ||
703		    (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
704		    (p_id[0] == 0)) {
705			ret = EINVAL;
706			break;
707		}
708		mutex_enter(&stmf_state.stmf_lock);
709		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
710			if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
711				break;
712		}
713		if (ilu == NULL) {
714			mutex_exit(&stmf_state.stmf_lock);
715			ret = ENOENT;
716			break;
717		}
718		lup = (sioc_lu_props_t *)obuf;
719		bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
720		lup->lu_state = ilu->ilu_state & 0x0f;
721		lup->lu_present = 1; /* XXX */
722		(void) strncpy(lup->lu_provider_name,
723		    ilu->ilu_lu->lu_lp->lp_name, 255);
724		lup->lu_provider_name[254] = '\0';
725		if (ilu->ilu_lu->lu_alias) {
726			(void) strncpy(lup->lu_alias,
727			    ilu->ilu_lu->lu_alias, 255);
728			lup->lu_alias[255] = '\0';
729		} else {
730			lup->lu_alias[0] = '\0';
731		}
732		mutex_exit(&stmf_state.stmf_lock);
733		break;
734
735	case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
736		p_id = (uint8_t *)ibuf;
737		if ((p_id == NULL) ||
738		    (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
739		    (iocd->stmf_obuf_size <
740		    sizeof (sioc_target_port_props_t))) {
741			ret = EINVAL;
742			break;
743		}
744		mutex_enter(&stmf_state.stmf_lock);
745		for (ilport = stmf_state.stmf_ilportlist; ilport;
746		    ilport = ilport->ilport_next) {
747			uint8_t *id;
748			id = (uint8_t *)ilport->ilport_lport->lport_id;
749			if ((p_id[3] == id[3]) &&
750			    (bcmp(p_id+4, id+4, id[3]) == 0))
751				break;
752		}
753		if (ilport == NULL) {
754			mutex_exit(&stmf_state.stmf_lock);
755			ret = ENOENT;
756			break;
757		}
758		lportp = (sioc_target_port_props_t *)obuf;
759		bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
760		    ilport->ilport_lport->lport_id->ident_length + 4);
761		lportp->tgt_state = ilport->ilport_state & 0x0f;
762		lportp->tgt_present = 1; /* XXX */
763		(void) strncpy(lportp->tgt_provider_name,
764		    ilport->ilport_lport->lport_pp->pp_name, 255);
765		lportp->tgt_provider_name[254] = '\0';
766		if (ilport->ilport_lport->lport_alias) {
767			(void) strncpy(lportp->tgt_alias,
768			    ilport->ilport_lport->lport_alias, 255);
769			lportp->tgt_alias[255] = '\0';
770		} else {
771			lportp->tgt_alias[0] = '\0';
772		}
773		mutex_exit(&stmf_state.stmf_lock);
774		break;
775
776	case STMF_IOCTL_SET_STMF_STATE:
777		if ((ibuf == NULL) ||
778		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
779			ret = EINVAL;
780			break;
781		}
782		ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
783		break;
784
785	case STMF_IOCTL_GET_STMF_STATE:
786		if ((obuf == NULL) ||
787		    (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
788			ret = EINVAL;
789			break;
790		}
791		ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
792		break;
793
794	case STMF_IOCTL_SET_ALUA_STATE:
795		if ((ibuf == NULL) ||
796		    (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
797			ret = EINVAL;
798			break;
799		}
800		ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
801		break;
802
803	case STMF_IOCTL_GET_ALUA_STATE:
804		if ((obuf == NULL) ||
805		    (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
806			ret = EINVAL;
807			break;
808		}
809		stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
810		break;
811
812	case STMF_IOCTL_SET_LU_STATE:
813		ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
814		ssi.st_additional_info = NULL;
815		std = (stmf_state_desc_t *)ibuf;
816		if ((ibuf == NULL) ||
817		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
818			ret = EINVAL;
819			break;
820		}
821		p_id = std->ident;
822		mutex_enter(&stmf_state.stmf_lock);
823		if (stmf_state.stmf_inventory_locked) {
824			mutex_exit(&stmf_state.stmf_lock);
825			ret = EBUSY;
826			break;
827		}
828		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
829			if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
830				break;
831		}
832		if (ilu == NULL) {
833			mutex_exit(&stmf_state.stmf_lock);
834			ret = ENOENT;
835			break;
836		}
837		stmf_state.stmf_inventory_locked = 1;
838		mutex_exit(&stmf_state.stmf_lock);
839		cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
840		    STMF_CMD_LU_OFFLINE;
841		ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
842		if (ctl_ret == STMF_ALREADY)
843			ret = 0;
844		else if (ctl_ret == STMF_BUSY)
845			ret = EBUSY;
846		else if (ctl_ret != STMF_SUCCESS)
847			ret = EIO;
848		mutex_enter(&stmf_state.stmf_lock);
849		stmf_state.stmf_inventory_locked = 0;
850		mutex_exit(&stmf_state.stmf_lock);
851		break;
852
853	case STMF_IOCTL_SET_STMF_PROPS:
854		if ((ibuf == NULL) ||
855		    (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
856			ret = EINVAL;
857			break;
858		}
859		stmf_set_props = (stmf_set_props_t *)ibuf;
860		mutex_enter(&stmf_state.stmf_lock);
861		if ((stmf_set_props->default_lu_state_value ==
862		    STMF_STATE_OFFLINE) ||
863		    (stmf_set_props->default_lu_state_value ==
864		    STMF_STATE_ONLINE)) {
865			stmf_state.stmf_default_lu_state =
866			    stmf_set_props->default_lu_state_value;
867		}
868		if ((stmf_set_props->default_target_state_value ==
869		    STMF_STATE_OFFLINE) ||
870		    (stmf_set_props->default_target_state_value ==
871		    STMF_STATE_ONLINE)) {
872			stmf_state.stmf_default_lport_state =
873			    stmf_set_props->default_target_state_value;
874		}
875
876		mutex_exit(&stmf_state.stmf_lock);
877		break;
878
879	case STMF_IOCTL_SET_TARGET_PORT_STATE:
880		ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
881		ssi.st_additional_info = NULL;
882		std = (stmf_state_desc_t *)ibuf;
883		if ((ibuf == NULL) ||
884		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
885			ret = EINVAL;
886			break;
887		}
888		p_id = std->ident;
889		mutex_enter(&stmf_state.stmf_lock);
890		if (stmf_state.stmf_inventory_locked) {
891			mutex_exit(&stmf_state.stmf_lock);
892			ret = EBUSY;
893			break;
894		}
895		for (ilport = stmf_state.stmf_ilportlist; ilport;
896		    ilport = ilport->ilport_next) {
897			uint8_t *id;
898			id = (uint8_t *)ilport->ilport_lport->lport_id;
899			if ((id[3] == p_id[3]) &&
900			    (bcmp(id+4, p_id+4, id[3]) == 0)) {
901				break;
902			}
903		}
904		if (ilport == NULL) {
905			mutex_exit(&stmf_state.stmf_lock);
906			ret = ENOENT;
907			break;
908		}
909		stmf_state.stmf_inventory_locked = 1;
910		mutex_exit(&stmf_state.stmf_lock);
911		cmd = (std->state == STMF_STATE_ONLINE) ?
912		    STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
913		ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
914		if (ctl_ret == STMF_ALREADY)
915			ret = 0;
916		else if (ctl_ret == STMF_BUSY)
917			ret = EBUSY;
918		else if (ctl_ret != STMF_SUCCESS)
919			ret = EIO;
920		mutex_enter(&stmf_state.stmf_lock);
921		stmf_state.stmf_inventory_locked = 0;
922		mutex_exit(&stmf_state.stmf_lock);
923		break;
924
925	case STMF_IOCTL_ADD_HG_ENTRY:
926		idtype = STMF_ID_TYPE_HOST;
927		/* FALLTHROUGH */
928	case STMF_IOCTL_ADD_TG_ENTRY:
929		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
930			ret = EACCES;
931			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
932			break;
933		}
934		if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
935			idtype = STMF_ID_TYPE_TARGET;
936		}
937		grp_entry = (stmf_group_op_data_t *)ibuf;
938		if ((ibuf == NULL) ||
939		    (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
940			ret = EINVAL;
941			break;
942		}
943		if (grp_entry->group.name[0] == '*') {
944			ret = EINVAL;
945			break; /* not allowed */
946		}
947		mutex_enter(&stmf_state.stmf_lock);
948		ret = stmf_add_group_member(grp_entry->group.name,
949		    grp_entry->group.name_size,
950		    grp_entry->ident + 4,
951		    grp_entry->ident[3],
952		    idtype,
953		    &iocd->stmf_error);
954		mutex_exit(&stmf_state.stmf_lock);
955		break;
956	case STMF_IOCTL_REMOVE_HG_ENTRY:
957		idtype = STMF_ID_TYPE_HOST;
958		/* FALLTHROUGH */
959	case STMF_IOCTL_REMOVE_TG_ENTRY:
960		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
961			ret = EACCES;
962			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
963			break;
964		}
965		if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
966			idtype = STMF_ID_TYPE_TARGET;
967		}
968		grp_entry = (stmf_group_op_data_t *)ibuf;
969		if ((ibuf == NULL) ||
970		    (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
971			ret = EINVAL;
972			break;
973		}
974		if (grp_entry->group.name[0] == '*') {
975			ret = EINVAL;
976			break; /* not allowed */
977		}
978		mutex_enter(&stmf_state.stmf_lock);
979		ret = stmf_remove_group_member(grp_entry->group.name,
980		    grp_entry->group.name_size,
981		    grp_entry->ident + 4,
982		    grp_entry->ident[3],
983		    idtype,
984		    &iocd->stmf_error);
985		mutex_exit(&stmf_state.stmf_lock);
986		break;
987	case STMF_IOCTL_CREATE_HOST_GROUP:
988		idtype = STMF_ID_TYPE_HOST_GROUP;
989		/* FALLTHROUGH */
990	case STMF_IOCTL_CREATE_TARGET_GROUP:
991		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
992			ret = EACCES;
993			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
994			break;
995		}
996		grpname = (stmf_group_name_t *)ibuf;
997
998		if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
999			idtype = STMF_ID_TYPE_TARGET_GROUP;
1000		if ((ibuf == NULL) ||
1001		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1002			ret = EINVAL;
1003			break;
1004		}
1005		if (grpname->name[0] == '*') {
1006			ret = EINVAL;
1007			break; /* not allowed */
1008		}
1009		mutex_enter(&stmf_state.stmf_lock);
1010		ret = stmf_add_group(grpname->name,
1011		    grpname->name_size, idtype, &iocd->stmf_error);
1012		mutex_exit(&stmf_state.stmf_lock);
1013		break;
1014	case STMF_IOCTL_REMOVE_HOST_GROUP:
1015		idtype = STMF_ID_TYPE_HOST_GROUP;
1016		/* FALLTHROUGH */
1017	case STMF_IOCTL_REMOVE_TARGET_GROUP:
1018		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1019			ret = EACCES;
1020			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1021			break;
1022		}
1023		grpname = (stmf_group_name_t *)ibuf;
1024		if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1025			idtype = STMF_ID_TYPE_TARGET_GROUP;
1026		if ((ibuf == NULL) ||
1027		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1028			ret = EINVAL;
1029			break;
1030		}
1031		if (grpname->name[0] == '*') {
1032			ret = EINVAL;
1033			break; /* not allowed */
1034		}
1035		mutex_enter(&stmf_state.stmf_lock);
1036		ret = stmf_remove_group(grpname->name,
1037		    grpname->name_size, idtype, &iocd->stmf_error);
1038		mutex_exit(&stmf_state.stmf_lock);
1039		break;
1040	case STMF_IOCTL_VALIDATE_VIEW:
1041	case STMF_IOCTL_ADD_VIEW_ENTRY:
1042		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1043			ret = EACCES;
1044			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1045			break;
1046		}
1047		ve = (stmf_view_op_entry_t *)ibuf;
1048		if ((ibuf == NULL) ||
1049		    (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1050			ret = EINVAL;
1051			break;
1052		}
1053		if (!ve->ve_lu_number_valid)
1054			ve->ve_lu_nbr[2] = 0xFF;
1055		if (ve->ve_all_hosts) {
1056			ve->ve_host_group.name[0] = '*';
1057			ve->ve_host_group.name_size = 1;
1058		}
1059		if (ve->ve_all_targets) {
1060			ve->ve_target_group.name[0] = '*';
1061			ve->ve_target_group.name_size = 1;
1062		}
1063		if (ve->ve_ndx_valid)
1064			veid = ve->ve_ndx;
1065		else
1066			veid = 0xffffffff;
1067		mutex_enter(&stmf_state.stmf_lock);
1068		if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1069			ret = stmf_add_ve(ve->ve_host_group.name,
1070			    ve->ve_host_group.name_size,
1071			    ve->ve_target_group.name,
1072			    ve->ve_target_group.name_size,
1073			    ve->ve_guid,
1074			    &veid,
1075			    ve->ve_lu_nbr,
1076			    &iocd->stmf_error);
1077		} else {  /* STMF_IOCTL_VALIDATE_VIEW */
1078			ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1079			    ve->ve_host_group.name_size,
1080			    ve->ve_target_group.name,
1081			    ve->ve_target_group.name_size,
1082			    ve->ve_lu_nbr,
1083			    &iocd->stmf_error);
1084		}
1085		mutex_exit(&stmf_state.stmf_lock);
1086		if (ret == 0 &&
1087		    (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1088		    iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1089			stmf_view_op_entry_t *ve_ret =
1090			    (stmf_view_op_entry_t *)obuf;
1091			iocd->stmf_obuf_nentries = 1;
1092			iocd->stmf_obuf_max_nentries = 1;
1093			if (!ve->ve_ndx_valid) {
1094				ve_ret->ve_ndx = veid;
1095				ve_ret->ve_ndx_valid = 1;
1096			}
1097			if (!ve->ve_lu_number_valid) {
1098				ve_ret->ve_lu_number_valid = 1;
1099				bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1100			}
1101		}
1102		break;
1103	case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1104		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1105			ret = EACCES;
1106			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1107			break;
1108		}
1109		ve = (stmf_view_op_entry_t *)ibuf;
1110		if ((ibuf == NULL) ||
1111		    (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1112			ret = EINVAL;
1113			break;
1114		}
1115		if (!ve->ve_ndx_valid) {
1116			ret = EINVAL;
1117			break;
1118		}
1119		mutex_enter(&stmf_state.stmf_lock);
1120		ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1121		    &iocd->stmf_error);
1122		mutex_exit(&stmf_state.stmf_lock);
1123		break;
1124	case STMF_IOCTL_GET_HG_LIST:
1125		id_list = &stmf_state.stmf_hg_list;
1126		/* FALLTHROUGH */
1127	case STMF_IOCTL_GET_TG_LIST:
1128		if (cmd == STMF_IOCTL_GET_TG_LIST)
1129			id_list = &stmf_state.stmf_tg_list;
1130		mutex_enter(&stmf_state.stmf_lock);
1131		iocd->stmf_obuf_max_nentries = id_list->id_count;
1132		n = min(id_list->id_count,
1133		    (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1134		iocd->stmf_obuf_nentries = n;
1135		id_entry = id_list->idl_head;
1136		grpname = (stmf_group_name_t *)obuf;
1137		for (i = 0; i < n; i++) {
1138			if (id_entry->id_data[0] == '*') {
1139				if (iocd->stmf_obuf_nentries > 0) {
1140					iocd->stmf_obuf_nentries--;
1141				}
1142				id_entry = id_entry->id_next;
1143				continue;
1144			}
1145			grpname->name_size = id_entry->id_data_size;
1146			bcopy(id_entry->id_data, grpname->name,
1147			    id_entry->id_data_size);
1148			grpname++;
1149			id_entry = id_entry->id_next;
1150		}
1151		mutex_exit(&stmf_state.stmf_lock);
1152		break;
1153	case STMF_IOCTL_GET_HG_ENTRIES:
1154		id_list = &stmf_state.stmf_hg_list;
1155		/* FALLTHROUGH */
1156	case STMF_IOCTL_GET_TG_ENTRIES:
1157		grpname = (stmf_group_name_t *)ibuf;
1158		if ((ibuf == NULL) ||
1159		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1160			ret = EINVAL;
1161			break;
1162		}
1163		if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1164			id_list = &stmf_state.stmf_tg_list;
1165		}
1166		mutex_enter(&stmf_state.stmf_lock);
1167		id_entry = stmf_lookup_id(id_list, grpname->name_size,
1168		    grpname->name);
1169		if (!id_entry)
1170			ret = ENODEV;
1171		else {
1172			stmf_ge_ident_t *grp_entry;
1173			id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1174			iocd->stmf_obuf_max_nentries = id_list->id_count;
1175			n = min(id_list->id_count,
1176			    iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1177			iocd->stmf_obuf_nentries = n;
1178			id_entry = id_list->idl_head;
1179			grp_entry = (stmf_ge_ident_t *)obuf;
1180			for (i = 0; i < n; i++) {
1181				bcopy(id_entry->id_data, grp_entry->ident,
1182				    id_entry->id_data_size);
1183				grp_entry->ident_size = id_entry->id_data_size;
1184				id_entry = id_entry->id_next;
1185				grp_entry++;
1186			}
1187		}
1188		mutex_exit(&stmf_state.stmf_lock);
1189		break;
1190
1191	case STMF_IOCTL_GET_VE_LIST:
1192		n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1193		mutex_enter(&stmf_state.stmf_lock);
1194		ve = (stmf_view_op_entry_t *)obuf;
1195		for (id_entry = stmf_state.stmf_luid_list.idl_head;
1196		    id_entry; id_entry = id_entry->id_next) {
1197			for (view_entry = (stmf_view_entry_t *)
1198			    id_entry->id_impl_specific; view_entry;
1199			    view_entry = view_entry->ve_next) {
1200				iocd->stmf_obuf_max_nentries++;
1201				if (iocd->stmf_obuf_nentries >= n)
1202					continue;
1203				ve->ve_ndx_valid = 1;
1204				ve->ve_ndx = view_entry->ve_id;
1205				ve->ve_lu_number_valid = 1;
1206				bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1207				bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1208				    view_entry->ve_luid->id_data_size);
1209				if (view_entry->ve_hg->id_data[0] == '*') {
1210					ve->ve_all_hosts = 1;
1211				} else {
1212					bcopy(view_entry->ve_hg->id_data,
1213					    ve->ve_host_group.name,
1214					    view_entry->ve_hg->id_data_size);
1215					ve->ve_host_group.name_size =
1216					    view_entry->ve_hg->id_data_size;
1217				}
1218
1219				if (view_entry->ve_tg->id_data[0] == '*') {
1220					ve->ve_all_targets = 1;
1221				} else {
1222					bcopy(view_entry->ve_tg->id_data,
1223					    ve->ve_target_group.name,
1224					    view_entry->ve_tg->id_data_size);
1225					ve->ve_target_group.name_size =
1226					    view_entry->ve_tg->id_data_size;
1227				}
1228				ve++;
1229				iocd->stmf_obuf_nentries++;
1230			}
1231		}
1232		mutex_exit(&stmf_state.stmf_lock);
1233		break;
1234
1235	case STMF_IOCTL_LU_VE_LIST:
1236		p_id = (uint8_t *)ibuf;
1237		if ((iocd->stmf_ibuf_size != 16) ||
1238		    (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1239			ret = EINVAL;
1240			break;
1241		}
1242
1243		n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1244		mutex_enter(&stmf_state.stmf_lock);
1245		ve = (stmf_view_op_entry_t *)obuf;
1246		for (id_entry = stmf_state.stmf_luid_list.idl_head;
1247		    id_entry; id_entry = id_entry->id_next) {
1248			if (bcmp(id_entry->id_data, p_id, 16) != 0)
1249				continue;
1250			for (view_entry = (stmf_view_entry_t *)
1251			    id_entry->id_impl_specific; view_entry;
1252			    view_entry = view_entry->ve_next) {
1253				iocd->stmf_obuf_max_nentries++;
1254				if (iocd->stmf_obuf_nentries >= n)
1255					continue;
1256				ve->ve_ndx_valid = 1;
1257				ve->ve_ndx = view_entry->ve_id;
1258				ve->ve_lu_number_valid = 1;
1259				bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1260				bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1261				    view_entry->ve_luid->id_data_size);
1262				if (view_entry->ve_hg->id_data[0] == '*') {
1263					ve->ve_all_hosts = 1;
1264				} else {
1265					bcopy(view_entry->ve_hg->id_data,
1266					    ve->ve_host_group.name,
1267					    view_entry->ve_hg->id_data_size);
1268					ve->ve_host_group.name_size =
1269					    view_entry->ve_hg->id_data_size;
1270				}
1271
1272				if (view_entry->ve_tg->id_data[0] == '*') {
1273					ve->ve_all_targets = 1;
1274				} else {
1275					bcopy(view_entry->ve_tg->id_data,
1276					    ve->ve_target_group.name,
1277					    view_entry->ve_tg->id_data_size);
1278					ve->ve_target_group.name_size =
1279					    view_entry->ve_tg->id_data_size;
1280				}
1281				ve++;
1282				iocd->stmf_obuf_nentries++;
1283			}
1284			break;
1285		}
1286		mutex_exit(&stmf_state.stmf_lock);
1287		break;
1288
1289	case STMF_IOCTL_LOAD_PP_DATA:
1290		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1291			ret = EACCES;
1292			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1293			break;
1294		}
1295		ppi = (stmf_ppioctl_data_t *)ibuf;
1296		if ((ppi == NULL) ||
1297		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1298			ret = EINVAL;
1299			break;
1300		}
1301		/* returned token */
1302		ppi_token = (uint64_t *)obuf;
1303		if ((ppi_token == NULL) ||
1304		    (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1305			ret = EINVAL;
1306			break;
1307		}
1308		ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1309		break;
1310
1311	case STMF_IOCTL_GET_PP_DATA:
1312		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1313			ret = EACCES;
1314			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1315			break;
1316		}
1317		ppi = (stmf_ppioctl_data_t *)ibuf;
1318		if (ppi == NULL ||
1319		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1320			ret = EINVAL;
1321			break;
1322		}
1323		ppi_out = (stmf_ppioctl_data_t *)obuf;
1324		if ((ppi_out == NULL) ||
1325		    (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1326			ret = EINVAL;
1327			break;
1328		}
1329		ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1330		break;
1331
1332	case STMF_IOCTL_CLEAR_PP_DATA:
1333		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1334			ret = EACCES;
1335			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1336			break;
1337		}
1338		ppi = (stmf_ppioctl_data_t *)ibuf;
1339		if ((ppi == NULL) ||
1340		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1341			ret = EINVAL;
1342			break;
1343		}
1344		ret = stmf_delete_ppd_ioctl(ppi);
1345		break;
1346
1347	case STMF_IOCTL_CLEAR_TRACE:
1348		stmf_trace_clear();
1349		break;
1350
1351	case STMF_IOCTL_ADD_TRACE:
1352		if (iocd->stmf_ibuf_size && ibuf) {
1353			((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = '\0';
1354			stmf_trace("\nstradm", "%s\n", ibuf);
1355		}
1356		break;
1357
1358	case STMF_IOCTL_GET_TRACE_POSITION:
1359		if (obuf && (iocd->stmf_obuf_size > 3)) {
1360			mutex_enter(&trace_buf_lock);
1361			*((int *)obuf) = trace_buf_curndx;
1362			mutex_exit(&trace_buf_lock);
1363		} else {
1364			ret = EINVAL;
1365		}
1366		break;
1367
1368	case STMF_IOCTL_GET_TRACE:
1369		if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1370			ret = EINVAL;
1371			break;
1372		}
1373		i = *((int *)ibuf);
1374		if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1375		    trace_buf_size)) {
1376			ret = EINVAL;
1377			break;
1378		}
1379		mutex_enter(&trace_buf_lock);
1380		bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1381		mutex_exit(&trace_buf_lock);
1382		break;
1383
1384	default:
1385		ret = ENOTTY;
1386	}
1387
1388	if (ret == 0) {
1389		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1390	} else if (iocd->stmf_error) {
1391		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1392	}
1393	if (obuf) {
1394		kmem_free(obuf, iocd->stmf_obuf_size);
1395		obuf = NULL;
1396	}
1397	if (ibuf) {
1398		kmem_free(ibuf, iocd->stmf_ibuf_size);
1399		ibuf = NULL;
1400	}
1401	kmem_free(iocd, sizeof (stmf_iocdata_t));
1402	return (ret);
1403}
1404
1405static int
1406stmf_get_service_state()
1407{
1408	stmf_i_local_port_t *ilport;
1409	stmf_i_lu_t *ilu;
1410	int online = 0;
1411	int offline = 0;
1412	int onlining = 0;
1413	int offlining = 0;
1414
1415	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1416	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1417	    ilport = ilport->ilport_next) {
1418		if (ilport->ilport_state == STMF_STATE_OFFLINE)
1419			offline++;
1420		else if (ilport->ilport_state == STMF_STATE_ONLINE)
1421			online++;
1422		else if (ilport->ilport_state == STMF_STATE_ONLINING)
1423			onlining++;
1424		else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1425			offlining++;
1426	}
1427
1428	for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1429	    ilu = ilu->ilu_next) {
1430		if (ilu->ilu_state == STMF_STATE_OFFLINE)
1431			offline++;
1432		else if (ilu->ilu_state == STMF_STATE_ONLINE)
1433			online++;
1434		else if (ilu->ilu_state == STMF_STATE_ONLINING)
1435			onlining++;
1436		else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1437			offlining++;
1438	}
1439
1440	if (stmf_state.stmf_service_running) {
1441		if (onlining)
1442			return (STMF_STATE_ONLINING);
1443		else
1444			return (STMF_STATE_ONLINE);
1445	}
1446
1447	if (offlining) {
1448		return (STMF_STATE_OFFLINING);
1449	}
1450
1451	return (STMF_STATE_OFFLINE);
1452}
1453
1454static int
1455stmf_set_stmf_state(stmf_state_desc_t *std)
1456{
1457	stmf_i_local_port_t *ilport;
1458	stmf_i_lu_t *ilu;
1459	stmf_state_change_info_t ssi;
1460	int svc_state;
1461
1462	ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1463	ssi.st_additional_info = NULL;
1464
1465	mutex_enter(&stmf_state.stmf_lock);
1466	if (!stmf_state.stmf_exclusive_open) {
1467		mutex_exit(&stmf_state.stmf_lock);
1468		return (EACCES);
1469	}
1470
1471	if (stmf_state.stmf_inventory_locked) {
1472		mutex_exit(&stmf_state.stmf_lock);
1473		return (EBUSY);
1474	}
1475
1476	if ((std->state != STMF_STATE_ONLINE) &&
1477	    (std->state != STMF_STATE_OFFLINE)) {
1478		mutex_exit(&stmf_state.stmf_lock);
1479		return (EINVAL);
1480	}
1481
1482	svc_state = stmf_get_service_state();
1483	if ((svc_state == STMF_STATE_OFFLINING) ||
1484	    (svc_state == STMF_STATE_ONLINING)) {
1485		mutex_exit(&stmf_state.stmf_lock);
1486		return (EBUSY);
1487	}
1488
1489	if (svc_state == STMF_STATE_OFFLINE) {
1490		if (std->config_state == STMF_CONFIG_INIT) {
1491			if (std->state != STMF_STATE_OFFLINE) {
1492				mutex_exit(&stmf_state.stmf_lock);
1493				return (EINVAL);
1494			}
1495			stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1496			stmf_delete_all_ppds();
1497			stmf_view_clear_config();
1498			stmf_view_init();
1499			mutex_exit(&stmf_state.stmf_lock);
1500			return (0);
1501		}
1502		if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1503		    (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1504			if (std->config_state != STMF_CONFIG_INIT_DONE) {
1505				mutex_exit(&stmf_state.stmf_lock);
1506				return (EINVAL);
1507			}
1508			stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1509		}
1510		if (std->state == STMF_STATE_OFFLINE) {
1511			mutex_exit(&stmf_state.stmf_lock);
1512			return (0);
1513		}
1514		if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1515			mutex_exit(&stmf_state.stmf_lock);
1516			return (EINVAL);
1517		}
1518		stmf_state.stmf_inventory_locked = 1;
1519		stmf_state.stmf_service_running = 1;
1520		mutex_exit(&stmf_state.stmf_lock);
1521
1522		for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1523		    ilport = ilport->ilport_next) {
1524			if (stmf_state.stmf_default_lport_state !=
1525			    STMF_STATE_ONLINE)
1526				continue;
1527			(void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1528			    ilport->ilport_lport, &ssi);
1529		}
1530
1531		for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1532		    ilu = ilu->ilu_next) {
1533			if (stmf_state.stmf_default_lu_state !=
1534			    STMF_STATE_ONLINE)
1535				continue;
1536			(void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1537		}
1538		mutex_enter(&stmf_state.stmf_lock);
1539		stmf_state.stmf_inventory_locked = 0;
1540		mutex_exit(&stmf_state.stmf_lock);
1541		return (0);
1542	}
1543
1544	/* svc_state is STMF_STATE_ONLINE here */
1545	if ((std->state != STMF_STATE_OFFLINE) ||
1546	    (std->config_state == STMF_CONFIG_INIT)) {
1547		mutex_exit(&stmf_state.stmf_lock);
1548		return (EACCES);
1549	}
1550
1551	stmf_state.stmf_inventory_locked = 1;
1552	stmf_state.stmf_service_running = 0;
1553
1554	mutex_exit(&stmf_state.stmf_lock);
1555	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1556	    ilport = ilport->ilport_next) {
1557		if (ilport->ilport_state != STMF_STATE_ONLINE)
1558			continue;
1559		(void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1560		    ilport->ilport_lport, &ssi);
1561	}
1562
1563	for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1564	    ilu = ilu->ilu_next) {
1565		if (ilu->ilu_state != STMF_STATE_ONLINE)
1566			continue;
1567		(void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1568	}
1569	mutex_enter(&stmf_state.stmf_lock);
1570	stmf_state.stmf_inventory_locked = 0;
1571	mutex_exit(&stmf_state.stmf_lock);
1572	return (0);
1573}
1574
1575static int
1576stmf_get_stmf_state(stmf_state_desc_t *std)
1577{
1578	mutex_enter(&stmf_state.stmf_lock);
1579	std->state = stmf_get_service_state();
1580	std->config_state = stmf_state.stmf_config_state;
1581	mutex_exit(&stmf_state.stmf_lock);
1582
1583	return (0);
1584}
1585
1586/*
1587 * handles registration message from pppt for a logical unit
1588 */
1589stmf_status_t
1590stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1591{
1592	stmf_i_lu_provider_t	*ilp;
1593	stmf_lu_provider_t	*lp;
1594	mutex_enter(&stmf_state.stmf_lock);
1595	for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1596		if (strcmp(msg->icrl_lu_provider_name,
1597		    ilp->ilp_lp->lp_name) == 0) {
1598			lp = ilp->ilp_lp;
1599			mutex_exit(&stmf_state.stmf_lock);
1600			lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1601			    msg->icrl_cb_arg_len, type);
1602			return (STMF_SUCCESS);
1603		}
1604	}
1605	mutex_exit(&stmf_state.stmf_lock);
1606	return (STMF_SUCCESS);
1607}
1608
1609/*
1610 * handles de-registration message from pppt for a logical unit
1611 */
1612stmf_status_t
1613stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1614{
1615	stmf_i_lu_provider_t	*ilp;
1616	stmf_lu_provider_t	*lp;
1617	mutex_enter(&stmf_state.stmf_lock);
1618	for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1619		if (strcmp(msg->icrl_lu_provider_name,
1620		    ilp->ilp_lp->lp_name) == 0) {
1621			lp = ilp->ilp_lp;
1622			mutex_exit(&stmf_state.stmf_lock);
1623			lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1624			    STMF_MSG_LU_DEREGISTER);
1625			return (STMF_SUCCESS);
1626		}
1627	}
1628	mutex_exit(&stmf_state.stmf_lock);
1629	return (STMF_SUCCESS);
1630}
1631
1632/*
1633 * helper function to find a task that matches a task_msgid
1634 */
1635scsi_task_t *
1636find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1637{
1638	stmf_i_lu_t *ilu;
1639	stmf_i_scsi_task_t *itask;
1640
1641	mutex_enter(&stmf_state.stmf_lock);
1642	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1643		if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1644			break;
1645		}
1646	}
1647
1648	if (ilu == NULL) {
1649		mutex_exit(&stmf_state.stmf_lock);
1650		return (NULL);
1651	}
1652
1653	mutex_enter(&ilu->ilu_task_lock);
1654	for (itask = ilu->ilu_tasks; itask != NULL;
1655	    itask = itask->itask_lu_next) {
1656		mutex_enter(&itask->itask_mutex);
1657		if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1658		    ITASK_BEING_ABORTED)) {
1659			mutex_exit(&itask->itask_mutex);
1660			continue;
1661		}
1662		mutex_exit(&itask->itask_mutex);
1663		if (itask->itask_proxy_msg_id == task_msgid) {
1664			break;
1665		}
1666	}
1667	mutex_exit(&ilu->ilu_task_lock);
1668	mutex_exit(&stmf_state.stmf_lock);
1669
1670	if (itask != NULL) {
1671		return (itask->itask_task);
1672	} else {
1673		/* task not found. Likely already aborted. */
1674		return (NULL);
1675	}
1676}
1677
1678/*
1679 * message received from pppt/ic
1680 */
1681stmf_status_t
1682stmf_msg_rx(stmf_ic_msg_t *msg)
1683{
1684	mutex_enter(&stmf_state.stmf_lock);
1685	if (stmf_state.stmf_alua_state != 1) {
1686		mutex_exit(&stmf_state.stmf_lock);
1687		cmn_err(CE_WARN, "stmf alua state is disabled");
1688		ic_msg_free(msg);
1689		return (STMF_FAILURE);
1690	}
1691	mutex_exit(&stmf_state.stmf_lock);
1692
1693	switch (msg->icm_msg_type) {
1694		case STMF_ICM_REGISTER_LUN:
1695			(void) stmf_ic_lu_reg(
1696			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1697			    STMF_MSG_LU_REGISTER);
1698			break;
1699		case STMF_ICM_LUN_ACTIVE:
1700			(void) stmf_ic_lu_reg(
1701			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1702			    STMF_MSG_LU_ACTIVE);
1703			break;
1704		case STMF_ICM_DEREGISTER_LUN:
1705			(void) stmf_ic_lu_dereg(
1706			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1707			break;
1708		case STMF_ICM_SCSI_DATA:
1709			(void) stmf_ic_rx_scsi_data(
1710			    (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1711			break;
1712		case STMF_ICM_SCSI_STATUS:
1713			(void) stmf_ic_rx_scsi_status(
1714			    (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1715			break;
1716		case STMF_ICM_STATUS:
1717			(void) stmf_ic_rx_status(
1718			    (stmf_ic_status_msg_t *)msg->icm_msg);
1719			break;
1720		default:
1721			cmn_err(CE_WARN, "unknown message received %d",
1722			    msg->icm_msg_type);
1723			ic_msg_free(msg);
1724			return (STMF_FAILURE);
1725	}
1726	ic_msg_free(msg);
1727	return (STMF_SUCCESS);
1728}
1729
1730stmf_status_t
1731stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1732{
1733	stmf_i_local_port_t *ilport;
1734
1735	if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1736		/* for now, ignore other message status */
1737		return (STMF_SUCCESS);
1738	}
1739
1740	if (msg->ics_status != STMF_SUCCESS) {
1741		return (STMF_SUCCESS);
1742	}
1743
1744	mutex_enter(&stmf_state.stmf_lock);
1745	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1746	    ilport = ilport->ilport_next) {
1747		if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1748			ilport->ilport_proxy_registered = 1;
1749			break;
1750		}
1751	}
1752	mutex_exit(&stmf_state.stmf_lock);
1753	return (STMF_SUCCESS);
1754}
1755
1756/*
1757 * handles scsi status message from pppt
1758 */
1759stmf_status_t
1760stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1761{
1762	scsi_task_t *task;
1763
1764	/* is this a task management command */
1765	if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1766		return (STMF_SUCCESS);
1767	}
1768
1769	task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1770
1771	if (task == NULL) {
1772		return (STMF_SUCCESS);
1773	}
1774
1775	task->task_scsi_status = msg->icss_status;
1776	task->task_sense_data = msg->icss_sense;
1777	task->task_sense_length = msg->icss_sense_len;
1778	(void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1779
1780	return (STMF_SUCCESS);
1781}
1782
1783/*
1784 * handles scsi data message from pppt
1785 */
1786stmf_status_t
1787stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1788{
1789	stmf_i_scsi_task_t *itask;
1790	scsi_task_t *task;
1791	stmf_xfer_data_t *xd = NULL;
1792	stmf_data_buf_t *dbuf;
1793	uint32_t sz, minsz, xd_sz, asz;
1794
1795	/* is this a task management command */
1796	if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1797		return (STMF_SUCCESS);
1798	}
1799
1800	task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1801	if (task == NULL) {
1802		stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1803		static uint64_t data_msg_id;
1804		stmf_status_t ic_ret = STMF_FAILURE;
1805		mutex_enter(&stmf_state.stmf_lock);
1806		data_msg_id = stmf_proxy_msg_id++;
1807		mutex_exit(&stmf_state.stmf_lock);
1808		/*
1809		 * send xfer done status to pppt
1810		 * for now, set the session id to 0 as we cannot
1811		 * ascertain it since we cannot find the task
1812		 */
1813		ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1814		    msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1815		if (ic_xfer_done_msg) {
1816			ic_ret = ic_tx_msg(ic_xfer_done_msg);
1817			if (ic_ret != STMF_IC_MSG_SUCCESS) {
1818				cmn_err(CE_WARN, "unable to xmit proxy msg");
1819			}
1820		}
1821		return (STMF_FAILURE);
1822	}
1823
1824	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1825	dbuf = itask->itask_proxy_dbuf;
1826
1827	task->task_cmd_xfer_length += msg->icsd_data_len;
1828
1829	if (task->task_additional_flags &
1830	    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1831		task->task_expected_xfer_length =
1832		    task->task_cmd_xfer_length;
1833	}
1834
1835	sz = min(task->task_expected_xfer_length,
1836	    task->task_cmd_xfer_length);
1837
1838	xd_sz = msg->icsd_data_len;
1839	asz = xd_sz + sizeof (*xd) - 4;
1840	xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1841
1842	if (xd == NULL) {
1843		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1844		    STMF_ALLOC_FAILURE, NULL);
1845		return (STMF_FAILURE);
1846	}
1847
1848	xd->alloc_size = asz;
1849	xd->size_left = xd_sz;
1850	bcopy(msg->icsd_data, xd->buf, xd_sz);
1851
1852	sz = min(sz, xd->size_left);
1853	xd->size_left = sz;
1854	minsz = min(512, sz);
1855
1856	if (dbuf == NULL)
1857		dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1858	if (dbuf == NULL) {
1859		kmem_free(xd, xd->alloc_size);
1860		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1861		    STMF_ALLOC_FAILURE, NULL);
1862		return (STMF_FAILURE);
1863	}
1864	dbuf->db_lu_private = xd;
1865	dbuf->db_relative_offset = task->task_nbytes_transferred;
1866	stmf_xd_to_dbuf(dbuf, 0);
1867
1868	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1869	(void) stmf_xfer_data(task, dbuf, 0);
1870	return (STMF_SUCCESS);
1871}
1872
1873stmf_status_t
1874stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1875{
1876	stmf_i_scsi_task_t *itask =
1877	    (stmf_i_scsi_task_t *)task->task_stmf_private;
1878	stmf_i_local_port_t *ilport =
1879	    (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1880	stmf_ic_msg_t *ic_cmd_msg;
1881	stmf_ic_msg_status_t ic_ret;
1882	stmf_status_t ret = STMF_FAILURE;
1883
1884	if (stmf_state.stmf_alua_state != 1) {
1885		cmn_err(CE_WARN, "stmf alua state is disabled");
1886		return (STMF_FAILURE);
1887	}
1888
1889	if (ilport->ilport_proxy_registered == 0) {
1890		return (STMF_FAILURE);
1891	}
1892
1893	mutex_enter(&stmf_state.stmf_lock);
1894	itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1895	mutex_exit(&stmf_state.stmf_lock);
1896	itask->itask_proxy_dbuf = dbuf;
1897
1898	/*
1899	 * stmf will now take over the task handling for this task
1900	 * but it still needs to be treated differently from other
1901	 * default handled tasks, hence the ITASK_PROXY_TASK.
1902	 * If this is a task management function, we're really just
1903	 * duping the command to the peer. Set the TM bit so that
1904	 * we can recognize this on return since we won't be completing
1905	 * the proxied task in that case.
1906	 */
1907	mutex_enter(&itask->itask_mutex);
1908	if (task->task_mgmt_function) {
1909		itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1910	} else {
1911		if (itask->itask_flags & ITASK_BEING_ABORTED) {
1912			mutex_exit(&itask->itask_mutex);
1913			return (STMF_FAILURE);
1914		}
1915		itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1916	}
1917	if (dbuf) {
1918		ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1919		    task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1920		    itask->itask_proxy_msg_id);
1921	} else {
1922		ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1923		    task, 0, NULL, itask->itask_proxy_msg_id);
1924	}
1925	mutex_exit(&itask->itask_mutex);
1926	if (ic_cmd_msg) {
1927		ic_ret = ic_tx_msg(ic_cmd_msg);
1928		if (ic_ret == STMF_IC_MSG_SUCCESS) {
1929			ret = STMF_SUCCESS;
1930		}
1931	}
1932	return (ret);
1933}
1934
1935
1936stmf_status_t
1937pppt_modload()
1938{
1939	int error;
1940
1941	if (pppt_mod == NULL && ((pppt_mod =
1942	    ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1943		cmn_err(CE_WARN, "Unable to load pppt");
1944		return (STMF_FAILURE);
1945	}
1946
1947	if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1948	    (stmf_ic_reg_port_msg_alloc_func_t)
1949	    ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1950	    &error)) == NULL)) {
1951		cmn_err(CE_WARN,
1952		    "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1953		return (STMF_FAILURE);
1954	}
1955
1956
1957	if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1958	    (stmf_ic_dereg_port_msg_alloc_func_t)
1959	    ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1960	    &error)) == NULL)) {
1961		cmn_err(CE_WARN,
1962		    "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1963		return (STMF_FAILURE);
1964	}
1965
1966	if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1967	    (stmf_ic_reg_lun_msg_alloc_func_t)
1968	    ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1969	    &error)) == NULL)) {
1970		cmn_err(CE_WARN,
1971		    "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1972		return (STMF_FAILURE);
1973	}
1974
1975	if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1976	    (stmf_ic_lun_active_msg_alloc_func_t)
1977	    ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1978	    &error)) == NULL)) {
1979		cmn_err(CE_WARN,
1980		    "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1981		return (STMF_FAILURE);
1982	}
1983
1984	if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
1985	    (stmf_ic_dereg_lun_msg_alloc_func_t)
1986	    ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
1987	    &error)) == NULL)) {
1988		cmn_err(CE_WARN,
1989		    "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
1990		return (STMF_FAILURE);
1991	}
1992
1993	if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
1994	    (stmf_ic_scsi_cmd_msg_alloc_func_t)
1995	    ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
1996	    &error)) == NULL)) {
1997		cmn_err(CE_WARN,
1998		    "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
1999		return (STMF_FAILURE);
2000	}
2001
2002	if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2003	    ((ic_scsi_data_xfer_done_msg_alloc =
2004	    (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2005	    ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2006	    &error)) == NULL)) {
2007		cmn_err(CE_WARN,
2008		    "Unable to find symbol -"
2009		    "stmf_ic_scsi_data_xfer_done_msg_alloc");
2010		return (STMF_FAILURE);
2011	}
2012
2013	if (ic_session_reg_msg_alloc == NULL &&
2014	    ((ic_session_reg_msg_alloc =
2015	    (stmf_ic_session_create_msg_alloc_func_t)
2016	    ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2017	    &error)) == NULL)) {
2018		cmn_err(CE_WARN,
2019		    "Unable to find symbol -"
2020		    "stmf_ic_session_create_msg_alloc");
2021		return (STMF_FAILURE);
2022	}
2023
2024	if (ic_session_dereg_msg_alloc == NULL &&
2025	    ((ic_session_dereg_msg_alloc =
2026	    (stmf_ic_session_destroy_msg_alloc_func_t)
2027	    ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2028	    &error)) == NULL)) {
2029		cmn_err(CE_WARN,
2030		    "Unable to find symbol -"
2031		    "stmf_ic_session_destroy_msg_alloc");
2032		return (STMF_FAILURE);
2033	}
2034
2035	if (ic_tx_msg == NULL && ((ic_tx_msg =
2036	    (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2037	    &error)) == NULL)) {
2038		cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2039		return (STMF_FAILURE);
2040	}
2041
2042	if (ic_msg_free == NULL && ((ic_msg_free =
2043	    (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2044	    &error)) == NULL)) {
2045		cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2046		return (STMF_FAILURE);
2047	}
2048	return (STMF_SUCCESS);
2049}
2050
2051static void
2052stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2053{
2054	mutex_enter(&stmf_state.stmf_lock);
2055	alua_state->alua_node = stmf_state.stmf_alua_node;
2056	alua_state->alua_state = stmf_state.stmf_alua_state;
2057	mutex_exit(&stmf_state.stmf_lock);
2058}
2059
2060
2061static int
2062stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2063{
2064	stmf_i_local_port_t *ilport;
2065	stmf_i_lu_t *ilu;
2066	stmf_lu_t *lu;
2067	stmf_ic_msg_status_t ic_ret;
2068	stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2069	stmf_local_port_t *lport;
2070	int ret = 0;
2071
2072	if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2073		return (EINVAL);
2074	}
2075
2076	mutex_enter(&stmf_state.stmf_lock);
2077	if (alua_state->alua_state == 1) {
2078		if (pppt_modload() == STMF_FAILURE) {
2079			ret = EIO;
2080			goto err;
2081		}
2082		if (alua_state->alua_node != 0) {
2083			/* reset existing rtpids to new base */
2084			stmf_rtpid_counter = 255;
2085		}
2086		stmf_state.stmf_alua_node = alua_state->alua_node;
2087		stmf_state.stmf_alua_state = 1;
2088		/* register existing local ports with ppp */
2089		for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2090		    ilport = ilport->ilport_next) {
2091			/* skip standby ports and non-alua participants */
2092			if (ilport->ilport_standby == 1 ||
2093			    ilport->ilport_alua == 0) {
2094				continue;
2095			}
2096			if (alua_state->alua_node != 0) {
2097				ilport->ilport_rtpid =
2098				    atomic_inc_16_nv(&stmf_rtpid_counter);
2099			}
2100			lport = ilport->ilport_lport;
2101			ic_reg_port = ic_reg_port_msg_alloc(
2102			    lport->lport_id, ilport->ilport_rtpid,
2103			    0, NULL, stmf_proxy_msg_id);
2104			if (ic_reg_port) {
2105				ic_ret = ic_tx_msg(ic_reg_port);
2106				if (ic_ret == STMF_IC_MSG_SUCCESS) {
2107					ilport->ilport_reg_msgid =
2108					    stmf_proxy_msg_id++;
2109				} else {
2110					cmn_err(CE_WARN,
2111					    "error on port registration "
2112					    "port - %s",
2113					    ilport->ilport_kstat_tgt_name);
2114				}
2115			}
2116		}
2117		/* register existing logical units */
2118		for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2119		    ilu = ilu->ilu_next) {
2120			if (ilu->ilu_access != STMF_LU_ACTIVE) {
2121				continue;
2122			}
2123			/* register with proxy module */
2124			lu = ilu->ilu_lu;
2125			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2126			    lu->lu_lp->lp_alua_support) {
2127				ilu->ilu_alua = 1;
2128				/* allocate the register message */
2129				ic_reg_lun = ic_reg_lun_msg_alloc(
2130				    lu->lu_id->ident, lu->lu_lp->lp_name,
2131				    lu->lu_proxy_reg_arg_len,
2132				    (uint8_t *)lu->lu_proxy_reg_arg,
2133				    stmf_proxy_msg_id);
2134				/* send the message */
2135				if (ic_reg_lun) {
2136					ic_ret = ic_tx_msg(ic_reg_lun);
2137					if (ic_ret == STMF_IC_MSG_SUCCESS) {
2138						stmf_proxy_msg_id++;
2139					}
2140				}
2141			}
2142		}
2143	} else {
2144		stmf_state.stmf_alua_state = 0;
2145	}
2146
2147err:
2148	mutex_exit(&stmf_state.stmf_lock);
2149	return (ret);
2150}
2151
2152
2153typedef struct {
2154	void	*bp;	/* back pointer from internal struct to main struct */
2155	int	alloc_size;
2156} __istmf_t;
2157
2158typedef struct {
2159	__istmf_t	*fp;	/* Framework private */
2160	void		*cp;	/* Caller private */
2161	void		*ss;	/* struct specific */
2162} __stmf_t;
2163
2164static struct {
2165	int shared;
2166	int fw_private;
2167} stmf_sizes[] = { { 0, 0 },
2168	{ GET_STRUCT_SIZE(stmf_lu_provider_t),
2169		GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2170	{ GET_STRUCT_SIZE(stmf_port_provider_t),
2171		GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2172	{ GET_STRUCT_SIZE(stmf_local_port_t),
2173		GET_STRUCT_SIZE(stmf_i_local_port_t) },
2174	{ GET_STRUCT_SIZE(stmf_lu_t),
2175		GET_STRUCT_SIZE(stmf_i_lu_t) },
2176	{ GET_STRUCT_SIZE(stmf_scsi_session_t),
2177		GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2178	{ GET_STRUCT_SIZE(scsi_task_t),
2179		GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2180	{ GET_STRUCT_SIZE(stmf_data_buf_t),
2181		GET_STRUCT_SIZE(__istmf_t) },
2182	{ GET_STRUCT_SIZE(stmf_dbuf_store_t),
2183		GET_STRUCT_SIZE(__istmf_t) }
2184
2185};
2186
2187void *
2188stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2189{
2190	int stmf_size;
2191	int kmem_flag;
2192	__stmf_t *sh;
2193
2194	if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2195		return (NULL);
2196
2197	if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2198		kmem_flag = KM_NOSLEEP;
2199	} else {
2200		kmem_flag = KM_SLEEP;
2201	}
2202
2203	additional_size = (additional_size + 7) & (~7);
2204	stmf_size = stmf_sizes[struct_id].shared +
2205	    stmf_sizes[struct_id].fw_private + additional_size;
2206
2207	if (flags & AF_DONTZERO)
2208		sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2209	else
2210		sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2211
2212	if (sh == NULL)
2213		return (NULL);
2214
2215	/*
2216	 * In principle, the implementation inside stmf_alloc should not
2217	 * be changed anyway. But the original order of framework private
2218	 * data and caller private data does not support sglist in the caller
2219	 * private data.
2220	 * To work around this, the memory segments of framework private
2221	 * data and caller private data are re-ordered here.
2222	 * A better solution is to provide a specific interface to allocate
2223	 * the sglist, then we will not need this workaround any more.
2224	 * But before the new interface is available, the memory segment
2225	 * ordering should be kept as is.
2226	 */
2227	sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2228	sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2229	    stmf_sizes[struct_id].shared + additional_size);
2230
2231	sh->fp->bp = sh;
2232	/* Just store the total size instead of storing additional size */
2233	sh->fp->alloc_size = stmf_size;
2234
2235	return (sh);
2236}
2237
2238void
2239stmf_free(void *ptr)
2240{
2241	__stmf_t *sh = (__stmf_t *)ptr;
2242
2243	/*
2244	 * So far we dont need any struct specific processing. If such
2245	 * a need ever arises, then store the struct id in the framework
2246	 * private section and get it here as sh->fp->struct_id.
2247	 */
2248	kmem_free(ptr, sh->fp->alloc_size);
2249}
2250
2251/*
2252 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2253 * framework and returns a pointer to framework private data for the lu.
2254 * Returns NULL if the lu was not found.
2255 */
2256stmf_i_lu_t *
2257stmf_lookup_lu(stmf_lu_t *lu)
2258{
2259	stmf_i_lu_t *ilu;
2260	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2261
2262	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2263		if (ilu->ilu_lu == lu)
2264			return (ilu);
2265	}
2266	return (NULL);
2267}
2268
2269/*
2270 * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2271 * with the framework and returns a pointer to framework private data for
2272 * the lport.
2273 * Returns NULL if the lport was not found.
2274 */
2275stmf_i_local_port_t *
2276stmf_lookup_lport(stmf_local_port_t *lport)
2277{
2278	stmf_i_local_port_t *ilport;
2279	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2280
2281	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2282	    ilport = ilport->ilport_next) {
2283		if (ilport->ilport_lport == lport)
2284			return (ilport);
2285	}
2286	return (NULL);
2287}
2288
2289stmf_status_t
2290stmf_register_lu_provider(stmf_lu_provider_t *lp)
2291{
2292	stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2293	stmf_pp_data_t *ppd;
2294	uint32_t cb_flags;
2295
2296	if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2297		return (STMF_FAILURE);
2298
2299	mutex_enter(&stmf_state.stmf_lock);
2300	ilp->ilp_next = stmf_state.stmf_ilplist;
2301	stmf_state.stmf_ilplist = ilp;
2302	stmf_state.stmf_nlps++;
2303
2304	/* See if we need to do a callback */
2305	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2306		if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2307			break;
2308		}
2309	}
2310	if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2311		goto rlp_bail_out;
2312	}
2313	ilp->ilp_ppd = ppd;
2314	ppd->ppd_provider = ilp;
2315	if (lp->lp_cb == NULL)
2316		goto rlp_bail_out;
2317	ilp->ilp_cb_in_progress = 1;
2318	cb_flags = STMF_PCB_PREG_COMPLETE;
2319	if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2320		cb_flags |= STMF_PCB_STMF_ONLINING;
2321	mutex_exit(&stmf_state.stmf_lock);
2322	lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2323	mutex_enter(&stmf_state.stmf_lock);
2324	ilp->ilp_cb_in_progress = 0;
2325
2326rlp_bail_out:
2327	mutex_exit(&stmf_state.stmf_lock);
2328
2329	return (STMF_SUCCESS);
2330}
2331
2332stmf_status_t
2333stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2334{
2335	stmf_i_lu_provider_t	**ppilp;
2336	stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2337
2338	mutex_enter(&stmf_state.stmf_lock);
2339	if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2340		mutex_exit(&stmf_state.stmf_lock);
2341		return (STMF_BUSY);
2342	}
2343	for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2344	    ppilp = &((*ppilp)->ilp_next)) {
2345		if (*ppilp == ilp) {
2346			*ppilp = ilp->ilp_next;
2347			stmf_state.stmf_nlps--;
2348			if (ilp->ilp_ppd) {
2349				ilp->ilp_ppd->ppd_provider = NULL;
2350				ilp->ilp_ppd = NULL;
2351			}
2352			mutex_exit(&stmf_state.stmf_lock);
2353			return (STMF_SUCCESS);
2354		}
2355	}
2356	mutex_exit(&stmf_state.stmf_lock);
2357	return (STMF_NOT_FOUND);
2358}
2359
2360stmf_status_t
2361stmf_register_port_provider(stmf_port_provider_t *pp)
2362{
2363	stmf_i_port_provider_t *ipp =
2364	    (stmf_i_port_provider_t *)pp->pp_stmf_private;
2365	stmf_pp_data_t *ppd;
2366	uint32_t cb_flags;
2367
2368	if (pp->pp_portif_rev != PORTIF_REV_1)
2369		return (STMF_FAILURE);
2370
2371	mutex_enter(&stmf_state.stmf_lock);
2372	ipp->ipp_next = stmf_state.stmf_ipplist;
2373	stmf_state.stmf_ipplist = ipp;
2374	stmf_state.stmf_npps++;
2375	/* See if we need to do a callback */
2376	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2377		if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2378			break;
2379		}
2380	}
2381	if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2382		goto rpp_bail_out;
2383	}
2384	ipp->ipp_ppd = ppd;
2385	ppd->ppd_provider = ipp;
2386	if (pp->pp_cb == NULL)
2387		goto rpp_bail_out;
2388	ipp->ipp_cb_in_progress = 1;
2389	cb_flags = STMF_PCB_PREG_COMPLETE;
2390	if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2391		cb_flags |= STMF_PCB_STMF_ONLINING;
2392	mutex_exit(&stmf_state.stmf_lock);
2393	pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2394	mutex_enter(&stmf_state.stmf_lock);
2395	ipp->ipp_cb_in_progress = 0;
2396
2397rpp_bail_out:
2398	mutex_exit(&stmf_state.stmf_lock);
2399
2400	return (STMF_SUCCESS);
2401}
2402
2403stmf_status_t
2404stmf_deregister_port_provider(stmf_port_provider_t *pp)
2405{
2406	stmf_i_port_provider_t *ipp =
2407	    (stmf_i_port_provider_t *)pp->pp_stmf_private;
2408	stmf_i_port_provider_t **ppipp;
2409
2410	mutex_enter(&stmf_state.stmf_lock);
2411	if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2412		mutex_exit(&stmf_state.stmf_lock);
2413		return (STMF_BUSY);
2414	}
2415	for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2416	    ppipp = &((*ppipp)->ipp_next)) {
2417		if (*ppipp == ipp) {
2418			*ppipp = ipp->ipp_next;
2419			stmf_state.stmf_npps--;
2420			if (ipp->ipp_ppd) {
2421				ipp->ipp_ppd->ppd_provider = NULL;
2422				ipp->ipp_ppd = NULL;
2423			}
2424			mutex_exit(&stmf_state.stmf_lock);
2425			return (STMF_SUCCESS);
2426		}
2427	}
2428	mutex_exit(&stmf_state.stmf_lock);
2429	return (STMF_NOT_FOUND);
2430}
2431
2432int
2433stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2434    uint32_t *err_ret)
2435{
2436	stmf_i_port_provider_t		*ipp;
2437	stmf_i_lu_provider_t		*ilp;
2438	stmf_pp_data_t			*ppd;
2439	nvlist_t			*nv;
2440	int				s;
2441	int				ret;
2442
2443	*err_ret = 0;
2444
2445	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2446		return (EINVAL);
2447	}
2448
2449	mutex_enter(&stmf_state.stmf_lock);
2450	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2451		if (ppi->ppi_lu_provider) {
2452			if (!ppd->ppd_lu_provider)
2453				continue;
2454		} else if (ppi->ppi_port_provider) {
2455			if (!ppd->ppd_port_provider)
2456				continue;
2457		}
2458		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2459			break;
2460	}
2461
2462	if (ppd == NULL) {
2463		/* New provider */
2464		s = strlen(ppi->ppi_name);
2465		if (s > 254) {
2466			mutex_exit(&stmf_state.stmf_lock);
2467			return (EINVAL);
2468		}
2469		s += sizeof (stmf_pp_data_t) - 7;
2470
2471		ppd = kmem_zalloc(s, KM_NOSLEEP);
2472		if (ppd == NULL) {
2473			mutex_exit(&stmf_state.stmf_lock);
2474			return (ENOMEM);
2475		}
2476		ppd->ppd_alloc_size = s;
2477		(void) strcpy(ppd->ppd_name, ppi->ppi_name);
2478
2479		/* See if this provider already exists */
2480		if (ppi->ppi_lu_provider) {
2481			ppd->ppd_lu_provider = 1;
2482			for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2483			    ilp = ilp->ilp_next) {
2484				if (strcmp(ppi->ppi_name,
2485				    ilp->ilp_lp->lp_name) == 0) {
2486					ppd->ppd_provider = ilp;
2487					ilp->ilp_ppd = ppd;
2488					break;
2489				}
2490			}
2491		} else {
2492			ppd->ppd_port_provider = 1;
2493			for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2494			    ipp = ipp->ipp_next) {
2495				if (strcmp(ppi->ppi_name,
2496				    ipp->ipp_pp->pp_name) == 0) {
2497					ppd->ppd_provider = ipp;
2498					ipp->ipp_ppd = ppd;
2499					break;
2500				}
2501			}
2502		}
2503
2504		/* Link this ppd in */
2505		ppd->ppd_next = stmf_state.stmf_ppdlist;
2506		stmf_state.stmf_ppdlist = ppd;
2507	}
2508
2509	/*
2510	 * User is requesting that the token be checked.
2511	 * If there was another set after the user's get
2512	 * it's an error
2513	 */
2514	if (ppi->ppi_token_valid) {
2515		if (ppi->ppi_token != ppd->ppd_token) {
2516			*err_ret = STMF_IOCERR_PPD_UPDATED;
2517			mutex_exit(&stmf_state.stmf_lock);
2518			return (EINVAL);
2519		}
2520	}
2521
2522	if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2523	    (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2524		mutex_exit(&stmf_state.stmf_lock);
2525		return (ret);
2526	}
2527
2528	/* Free any existing lists and add this one to the ppd */
2529	if (ppd->ppd_nv)
2530		nvlist_free(ppd->ppd_nv);
2531	ppd->ppd_nv = nv;
2532
2533	/* set the token for writes */
2534	ppd->ppd_token++;
2535	/* return token to caller */
2536	if (ppi_token) {
2537		*ppi_token = ppd->ppd_token;
2538	}
2539
2540	/* If there is a provider registered, do the notifications */
2541	if (ppd->ppd_provider) {
2542		uint32_t cb_flags = 0;
2543
2544		if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2545			cb_flags |= STMF_PCB_STMF_ONLINING;
2546		if (ppi->ppi_lu_provider) {
2547			ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2548			if (ilp->ilp_lp->lp_cb == NULL)
2549				goto bail_out;
2550			ilp->ilp_cb_in_progress = 1;
2551			mutex_exit(&stmf_state.stmf_lock);
2552			ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2553			    STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2554			mutex_enter(&stmf_state.stmf_lock);
2555			ilp->ilp_cb_in_progress = 0;
2556		} else {
2557			ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2558			if (ipp->ipp_pp->pp_cb == NULL)
2559				goto bail_out;
2560			ipp->ipp_cb_in_progress = 1;
2561			mutex_exit(&stmf_state.stmf_lock);
2562			ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2563			    STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2564			mutex_enter(&stmf_state.stmf_lock);
2565			ipp->ipp_cb_in_progress = 0;
2566		}
2567	}
2568
2569bail_out:
2570	mutex_exit(&stmf_state.stmf_lock);
2571
2572	return (0);
2573}
2574
2575void
2576stmf_delete_ppd(stmf_pp_data_t *ppd)
2577{
2578	stmf_pp_data_t **pppd;
2579
2580	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2581	if (ppd->ppd_provider) {
2582		if (ppd->ppd_lu_provider) {
2583			((stmf_i_lu_provider_t *)
2584			    ppd->ppd_provider)->ilp_ppd = NULL;
2585		} else {
2586			((stmf_i_port_provider_t *)
2587			    ppd->ppd_provider)->ipp_ppd = NULL;
2588		}
2589		ppd->ppd_provider = NULL;
2590	}
2591
2592	for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2593	    pppd = &((*pppd)->ppd_next)) {
2594		if (*pppd == ppd)
2595			break;
2596	}
2597
2598	if (*pppd == NULL)
2599		return;
2600
2601	*pppd = ppd->ppd_next;
2602	if (ppd->ppd_nv)
2603		nvlist_free(ppd->ppd_nv);
2604
2605	kmem_free(ppd, ppd->ppd_alloc_size);
2606}
2607
2608int
2609stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2610{
2611	stmf_pp_data_t *ppd;
2612	int ret = ENOENT;
2613
2614	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2615		return (EINVAL);
2616	}
2617
2618	mutex_enter(&stmf_state.stmf_lock);
2619
2620	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2621		if (ppi->ppi_lu_provider) {
2622			if (!ppd->ppd_lu_provider)
2623				continue;
2624		} else if (ppi->ppi_port_provider) {
2625			if (!ppd->ppd_port_provider)
2626				continue;
2627		}
2628		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2629			break;
2630	}
2631
2632	if (ppd) {
2633		ret = 0;
2634		stmf_delete_ppd(ppd);
2635	}
2636	mutex_exit(&stmf_state.stmf_lock);
2637
2638	return (ret);
2639}
2640
2641int
2642stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2643    uint32_t *err_ret)
2644{
2645	stmf_pp_data_t *ppd;
2646	size_t req_size;
2647	int ret = ENOENT;
2648	char *bufp = (char *)ppi_out->ppi_data;
2649
2650	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2651		return (EINVAL);
2652	}
2653
2654	mutex_enter(&stmf_state.stmf_lock);
2655
2656	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2657		if (ppi->ppi_lu_provider) {
2658			if (!ppd->ppd_lu_provider)
2659				continue;
2660		} else if (ppi->ppi_port_provider) {
2661			if (!ppd->ppd_port_provider)
2662				continue;
2663		}
2664		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2665			break;
2666	}
2667
2668	if (ppd && ppd->ppd_nv) {
2669		ppi_out->ppi_token = ppd->ppd_token;
2670		if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2671		    NV_ENCODE_XDR)) != 0) {
2672			goto done;
2673		}
2674		ppi_out->ppi_data_size = req_size;
2675		if (req_size > ppi->ppi_data_size) {
2676			*err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2677			ret = EINVAL;
2678			goto done;
2679		}
2680
2681		if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2682		    NV_ENCODE_XDR, 0)) != 0) {
2683			goto done;
2684		}
2685		ret = 0;
2686	}
2687
2688done:
2689	mutex_exit(&stmf_state.stmf_lock);
2690
2691	return (ret);
2692}
2693
2694void
2695stmf_delete_all_ppds()
2696{
2697	stmf_pp_data_t *ppd, *nppd;
2698
2699	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2700	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2701		nppd = ppd->ppd_next;
2702		stmf_delete_ppd(ppd);
2703	}
2704}
2705
2706/*
2707 * 16 is the max string length of a protocol_ident, increase
2708 * the size if needed.
2709 */
2710#define	STMF_KSTAT_LU_SZ	(STMF_GUID_INPUT + 1 + 256)
2711#define	STMF_KSTAT_TGT_SZ	(256 * 2 + 16)
2712#define	STMF_KSTAT_RPORT_DATAMAX	(sizeof (stmf_kstat_rport_info_t) / \
2713					    sizeof (kstat_named_t))
2714
2715/*
2716 * This array matches the Protocol Identifier in stmf_ioctl.h
2717 */
2718#define	MAX_PROTO_STR_LEN	32
2719
2720char *protocol_ident[PROTOCOL_ANY] = {
2721	"Fibre Channel",
2722	"Parallel SCSI",
2723	"SSA",
2724	"IEEE_1394",
2725	"SRP",
2726	"iSCSI",
2727	"SAS",
2728	"ADT",
2729	"ATAPI",
2730	"UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2731};
2732
2733/*
2734 * Update the lun wait/run queue count
2735 */
2736static void
2737stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2738{
2739	stmf_i_lu_t		*ilu;
2740	kstat_io_t		*kip;
2741
2742	if (task->task_lu == dlun0)
2743		return;
2744	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2745	if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2746		kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2747		if (kip != NULL) {
2748			func(kip);
2749		}
2750	}
2751}
2752
2753/*
2754 * Update the target(lport) wait/run queue count
2755 */
2756static void
2757stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2758{
2759	stmf_i_local_port_t	*ilp;
2760	kstat_io_t		*kip;
2761
2762	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2763	if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2764		kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2765		if (kip != NULL) {
2766			mutex_enter(ilp->ilport_kstat_io->ks_lock);
2767			func(kip);
2768			mutex_exit(ilp->ilport_kstat_io->ks_lock);
2769		}
2770	}
2771}
2772
2773static void
2774stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2775{
2776	stmf_i_local_port_t	*ilp;
2777	kstat_io_t		*kip;
2778
2779	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2780	if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2781		kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2782		if (kip != NULL) {
2783			mutex_enter(ilp->ilport_kstat_io->ks_lock);
2784			STMF_UPDATE_KSTAT_IO(kip, dbuf);
2785			mutex_exit(ilp->ilport_kstat_io->ks_lock);
2786		}
2787	}
2788}
2789
2790static void
2791stmf_update_kstat_rport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2792{
2793	stmf_i_scsi_session_t	*iss;
2794	stmf_i_remote_port_t	*irport;
2795	kstat_io_t		*kip;
2796
2797	iss = task->task_session->ss_stmf_private;
2798	irport = iss->iss_irport;
2799	if (irport->irport_kstat_io != NULL) {
2800		kip = KSTAT_IO_PTR(irport->irport_kstat_io);
2801		mutex_enter(irport->irport_kstat_io->ks_lock);
2802		STMF_UPDATE_KSTAT_IO(kip, dbuf);
2803		mutex_exit(irport->irport_kstat_io->ks_lock);
2804	}
2805}
2806
2807static void
2808stmf_update_kstat_rport_estat(scsi_task_t *task)
2809{
2810	stmf_i_scsi_task_t		*itask;
2811	stmf_i_scsi_session_t		*iss;
2812	stmf_i_remote_port_t		*irport;
2813	stmf_kstat_rport_estat_t	*ks_estat;
2814	hrtime_t			lat = 0;
2815	uint32_t			n = 0;
2816
2817	itask = task->task_stmf_private;
2818	iss = task->task_session->ss_stmf_private;
2819	irport = iss->iss_irport;
2820
2821	if (irport->irport_kstat_estat == NULL)
2822		return;
2823
2824	ks_estat = (stmf_kstat_rport_estat_t *)KSTAT_NAMED_PTR(
2825	    irport->irport_kstat_estat);
2826
2827	mutex_enter(irport->irport_kstat_estat->ks_lock);
2828
2829	if (task->task_flags & TF_READ_DATA)
2830		n = atomic_dec_32_nv(&irport->irport_nread_tasks);
2831	else if (task->task_flags & TF_WRITE_DATA)
2832		n = atomic_dec_32_nv(&irport->irport_nwrite_tasks);
2833
2834	if (itask->itask_read_xfer > 0) {
2835		ks_estat->i_nread_tasks.value.ui64++;
2836		lat = stmf_update_rport_timestamps(
2837		    &irport->irport_rdstart_timestamp,
2838		    &irport->irport_rddone_timestamp, itask);
2839		if (n == 0)
2840			ks_estat->i_rport_read_latency.value.ui64 += lat;
2841	} else if ((itask->itask_write_xfer > 0) ||
2842	    (task->task_flags & TF_INITIAL_BURST)) {
2843		ks_estat->i_nwrite_tasks.value.ui64++;
2844		lat = stmf_update_rport_timestamps(
2845		    &irport->irport_wrstart_timestamp,
2846		    &irport->irport_wrdone_timestamp, itask);
2847		if (n == 0)
2848			ks_estat->i_rport_write_latency.value.ui64 += lat;
2849	}
2850
2851	if (n == 0) {
2852		if (task->task_flags & TF_READ_DATA) {
2853			irport->irport_rdstart_timestamp = LLONG_MAX;
2854			irport->irport_rddone_timestamp = 0;
2855		} else if (task->task_flags & TF_WRITE_DATA) {
2856			irport->irport_wrstart_timestamp = LLONG_MAX;
2857			irport->irport_wrdone_timestamp = 0;
2858		}
2859	}
2860
2861	mutex_exit(irport->irport_kstat_estat->ks_lock);
2862}
2863
2864static hrtime_t
2865stmf_update_rport_timestamps(hrtime_t *start_tstamp, hrtime_t *done_tstamp,
2866    stmf_i_scsi_task_t *itask)
2867{
2868	*start_tstamp = MIN(*start_tstamp, itask->itask_start_timestamp);
2869	if ((*done_tstamp == 0) &&
2870	    (itask->itask_xfer_done_timestamp == 0)) {
2871		*done_tstamp = *start_tstamp;
2872	} else {
2873		*done_tstamp = MAX(*done_tstamp,
2874		    itask->itask_xfer_done_timestamp);
2875	}
2876
2877	return (*done_tstamp - *start_tstamp);
2878}
2879
2880static void
2881stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2882{
2883	stmf_i_lu_t		*ilu;
2884	kstat_io_t		*kip;
2885
2886	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2887	if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2888		kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2889		if (kip != NULL) {
2890			mutex_enter(ilu->ilu_kstat_io->ks_lock);
2891			STMF_UPDATE_KSTAT_IO(kip, dbuf);
2892			mutex_exit(ilu->ilu_kstat_io->ks_lock);
2893		}
2894	}
2895}
2896
2897static void
2898stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2899{
2900	char				ks_nm[KSTAT_STRLEN];
2901	stmf_kstat_lu_info_t		*ks_lu;
2902
2903	/* create kstat lun info */
2904	ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2905	    KM_NOSLEEP);
2906	if (ks_lu == NULL) {
2907		cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2908		return;
2909	}
2910
2911	bzero(ks_nm, sizeof (ks_nm));
2912	(void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2913	if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2914	    ks_nm, "misc", KSTAT_TYPE_NAMED,
2915	    sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2916	    KSTAT_FLAG_VIRTUAL)) == NULL) {
2917		kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2918		cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2919		return;
2920	}
2921
2922	ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2923	ilu->ilu_kstat_info->ks_data = ks_lu;
2924
2925	kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2926	    KSTAT_DATA_STRING);
2927	kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2928	    KSTAT_DATA_STRING);
2929
2930	/* convert guid to hex string */
2931	int		i;
2932	uint8_t		*p = ilu->ilu_lu->lu_id->ident;
2933	bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2934	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2935		(void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2936	}
2937	kstat_named_setstr(&ks_lu->i_lun_guid,
2938	    (const char *)ilu->ilu_ascii_hex_guid);
2939	kstat_named_setstr(&ks_lu->i_lun_alias,
2940	    (const char *)ilu->ilu_lu->lu_alias);
2941	kstat_install(ilu->ilu_kstat_info);
2942
2943	/* create kstat lun io */
2944	bzero(ks_nm, sizeof (ks_nm));
2945	(void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2946	if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2947	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2948		cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2949		return;
2950	}
2951	mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2952	ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2953	kstat_install(ilu->ilu_kstat_io);
2954}
2955
2956static void
2957stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2958{
2959	char				ks_nm[KSTAT_STRLEN];
2960	stmf_kstat_tgt_info_t		*ks_tgt;
2961	int				id, len;
2962
2963	/* create kstat lport info */
2964	ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2965	    KM_NOSLEEP);
2966	if (ks_tgt == NULL) {
2967		cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2968		return;
2969	}
2970
2971	bzero(ks_nm, sizeof (ks_nm));
2972	(void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2973	if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2974	    0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2975	    sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2976	    KSTAT_FLAG_VIRTUAL)) == NULL) {
2977		kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2978		cmn_err(CE_WARN, "STMF: kstat_create target failed");
2979		return;
2980	}
2981
2982	ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2983	ilport->ilport_kstat_info->ks_data = ks_tgt;
2984
2985	kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2986	    KSTAT_DATA_STRING);
2987	kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2988	    KSTAT_DATA_STRING);
2989	kstat_named_init(&ks_tgt->i_protocol, "protocol",
2990	    KSTAT_DATA_STRING);
2991
2992	/* ident might not be null terminated */
2993	len = ilport->ilport_lport->lport_id->ident_length;
2994	bcopy(ilport->ilport_lport->lport_id->ident,
2995	    ilport->ilport_kstat_tgt_name, len);
2996	ilport->ilport_kstat_tgt_name[len + 1] = '\0';
2997	kstat_named_setstr(&ks_tgt->i_tgt_name,
2998	    (const char *)ilport->ilport_kstat_tgt_name);
2999	kstat_named_setstr(&ks_tgt->i_tgt_alias,
3000	    (const char *)ilport->ilport_lport->lport_alias);
3001	/* protocol */
3002	if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
3003		cmn_err(CE_WARN, "STMF: protocol_id out of bound");
3004		id = PROTOCOL_ANY;
3005	}
3006	kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
3007	kstat_install(ilport->ilport_kstat_info);
3008
3009	/* create kstat lport io */
3010	bzero(ks_nm, sizeof (ks_nm));
3011	(void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
3012	if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
3013	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3014		cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
3015		return;
3016	}
3017	mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
3018	ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
3019	kstat_install(ilport->ilport_kstat_io);
3020}
3021
3022/*
3023 * set the asymmetric access state for a logical unit
3024 * caller is responsible for establishing SCSI unit attention on
3025 * state change
3026 */
3027stmf_status_t
3028stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
3029{
3030	stmf_i_lu_t *ilu;
3031	uint8_t *p1, *p2;
3032
3033	if ((access_state != STMF_LU_STANDBY) &&
3034	    (access_state != STMF_LU_ACTIVE)) {
3035		return (STMF_INVALID_ARG);
3036	}
3037
3038	p1 = &lu->lu_id->ident[0];
3039	mutex_enter(&stmf_state.stmf_lock);
3040	if (stmf_state.stmf_inventory_locked) {
3041		mutex_exit(&stmf_state.stmf_lock);
3042		return (STMF_BUSY);
3043	}
3044
3045	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3046		p2 = &ilu->ilu_lu->lu_id->ident[0];
3047		if (bcmp(p1, p2, 16) == 0) {
3048			break;
3049		}
3050	}
3051
3052	if (!ilu) {
3053		ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3054	} else {
3055		/*
3056		 * We're changing access state on an existing logical unit
3057		 * Send the proxy registration message for this logical unit
3058		 * if we're in alua mode.
3059		 * If the requested state is STMF_LU_ACTIVE, we want to register
3060		 * this logical unit.
3061		 * If the requested state is STMF_LU_STANDBY, we're going to
3062		 * abort all tasks for this logical unit.
3063		 */
3064		if (stmf_state.stmf_alua_state == 1 &&
3065		    access_state == STMF_LU_ACTIVE) {
3066			stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3067			stmf_ic_msg_t *ic_reg_lun;
3068			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3069			    lu->lu_lp->lp_alua_support) {
3070				ilu->ilu_alua = 1;
3071				/* allocate the register message */
3072				ic_reg_lun = ic_lun_active_msg_alloc(p1,
3073				    lu->lu_lp->lp_name,
3074				    lu->lu_proxy_reg_arg_len,
3075				    (uint8_t *)lu->lu_proxy_reg_arg,
3076				    stmf_proxy_msg_id);
3077				/* send the message */
3078				if (ic_reg_lun) {
3079					ic_ret = ic_tx_msg(ic_reg_lun);
3080					if (ic_ret == STMF_IC_MSG_SUCCESS) {
3081						stmf_proxy_msg_id++;
3082					}
3083				}
3084			}
3085		} else if (stmf_state.stmf_alua_state == 1 &&
3086		    access_state == STMF_LU_STANDBY) {
3087			/* abort all tasks for this lu */
3088			stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
3089		}
3090	}
3091
3092	ilu->ilu_access = access_state;
3093
3094	mutex_exit(&stmf_state.stmf_lock);
3095	return (STMF_SUCCESS);
3096}
3097
3098
3099stmf_status_t
3100stmf_register_lu(stmf_lu_t *lu)
3101{
3102	stmf_i_lu_t *ilu;
3103	uint8_t *p1, *p2;
3104	stmf_state_change_info_t ssci;
3105	stmf_id_data_t *luid;
3106
3107	if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3108	    (lu->lu_id->ident_length != 16) ||
3109	    ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3110		return (STMF_INVALID_ARG);
3111	}
3112	p1 = &lu->lu_id->ident[0];
3113	mutex_enter(&stmf_state.stmf_lock);
3114	if (stmf_state.stmf_inventory_locked) {
3115		mutex_exit(&stmf_state.stmf_lock);
3116		return (STMF_BUSY);
3117	}
3118
3119	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3120		p2 = &ilu->ilu_lu->lu_id->ident[0];
3121		if (bcmp(p1, p2, 16) == 0) {
3122			mutex_exit(&stmf_state.stmf_lock);
3123			return (STMF_ALREADY);
3124		}
3125	}
3126
3127	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3128	luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3129	    lu->lu_id->ident_length, lu->lu_id->ident);
3130	if (luid) {
3131		luid->id_pt_to_object = (void *)ilu;
3132		ilu->ilu_luid = luid;
3133	}
3134	ilu->ilu_alias = NULL;
3135
3136	ilu->ilu_next = stmf_state.stmf_ilulist;
3137	ilu->ilu_prev = NULL;
3138	if (ilu->ilu_next)
3139		ilu->ilu_next->ilu_prev = ilu;
3140	stmf_state.stmf_ilulist = ilu;
3141	stmf_state.stmf_nlus++;
3142	if (lu->lu_lp) {
3143		((stmf_i_lu_provider_t *)
3144		    (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3145	}
3146	ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3147	STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3148	cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL);
3149	stmf_create_kstat_lu(ilu);
3150	/*
3151	 * register with proxy module if available and logical unit
3152	 * is in active state
3153	 */
3154	if (stmf_state.stmf_alua_state == 1 &&
3155	    ilu->ilu_access == STMF_LU_ACTIVE) {
3156		stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3157		stmf_ic_msg_t *ic_reg_lun;
3158		if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3159		    lu->lu_lp->lp_alua_support) {
3160			ilu->ilu_alua = 1;
3161			/* allocate the register message */
3162			ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3163			    lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3164			    (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3165			/* send the message */
3166			if (ic_reg_lun) {
3167				ic_ret = ic_tx_msg(ic_reg_lun);
3168				if (ic_ret == STMF_IC_MSG_SUCCESS) {
3169					stmf_proxy_msg_id++;
3170				}
3171			}
3172		}
3173	}
3174	mutex_exit(&stmf_state.stmf_lock);
3175
3176	/*  check the default state for lu */
3177	if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3178		ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3179	} else {
3180		ilu->ilu_prev_state = STMF_STATE_ONLINE;
3181		if (stmf_state.stmf_service_running) {
3182			ssci.st_rflags = 0;
3183			ssci.st_additional_info = NULL;
3184			(void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3185		}
3186	}
3187
3188	/* XXX: Generate event */
3189	return (STMF_SUCCESS);
3190}
3191
3192stmf_status_t
3193stmf_deregister_lu(stmf_lu_t *lu)
3194{
3195	stmf_i_lu_t *ilu;
3196
3197	mutex_enter(&stmf_state.stmf_lock);
3198	if (stmf_state.stmf_inventory_locked) {
3199		mutex_exit(&stmf_state.stmf_lock);
3200		return (STMF_BUSY);
3201	}
3202	ilu = stmf_lookup_lu(lu);
3203	if (ilu == NULL) {
3204		mutex_exit(&stmf_state.stmf_lock);
3205		return (STMF_INVALID_ARG);
3206	}
3207	if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3208		ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3209		while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3210			cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3211		}
3212		if (ilu->ilu_ntasks) {
3213			stmf_i_scsi_task_t *itask, *nitask;
3214
3215			nitask = ilu->ilu_tasks;
3216			do {
3217				itask = nitask;
3218				nitask = itask->itask_lu_next;
3219				lu->lu_task_free(itask->itask_task);
3220				stmf_free(itask->itask_task);
3221			} while (nitask != NULL);
3222
3223			ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3224			ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3225		}
3226		/* de-register with proxy if available */
3227		if (ilu->ilu_access == STMF_LU_ACTIVE &&
3228		    stmf_state.stmf_alua_state == 1) {
3229			/* de-register with proxy module */
3230			stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3231			stmf_ic_msg_t *ic_dereg_lun;
3232			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3233			    lu->lu_lp->lp_alua_support) {
3234				ilu->ilu_alua = 1;
3235				/* allocate the de-register message */
3236				ic_dereg_lun = ic_dereg_lun_msg_alloc(
3237				    lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3238				    NULL, stmf_proxy_msg_id);
3239				/* send the message */
3240				if (ic_dereg_lun) {
3241					ic_ret = ic_tx_msg(ic_dereg_lun);
3242					if (ic_ret == STMF_IC_MSG_SUCCESS) {
3243						stmf_proxy_msg_id++;
3244					}
3245				}
3246			}
3247		}
3248
3249		if (ilu->ilu_next)
3250			ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3251		if (ilu->ilu_prev)
3252			ilu->ilu_prev->ilu_next = ilu->ilu_next;
3253		else
3254			stmf_state.stmf_ilulist = ilu->ilu_next;
3255		stmf_state.stmf_nlus--;
3256
3257		if (ilu == stmf_state.stmf_svc_ilu_draining) {
3258			stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3259		}
3260		if (ilu == stmf_state.stmf_svc_ilu_timing) {
3261			stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3262		}
3263		if (lu->lu_lp) {
3264			((stmf_i_lu_provider_t *)
3265			    (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3266		}
3267		if (ilu->ilu_luid) {
3268			((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3269			    NULL;
3270			ilu->ilu_luid = NULL;
3271		}
3272		STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3273	} else {
3274		mutex_exit(&stmf_state.stmf_lock);
3275		return (STMF_BUSY);
3276	}
3277	if (ilu->ilu_kstat_info) {
3278		kmem_free(ilu->ilu_kstat_info->ks_data, STMF_KSTAT_LU_SZ);
3279		kstat_delete(ilu->ilu_kstat_info);
3280	}
3281	if (ilu->ilu_kstat_io) {
3282		kstat_delete(ilu->ilu_kstat_io);
3283		mutex_destroy(&ilu->ilu_kstat_lock);
3284	}
3285	cv_destroy(&ilu->ilu_offline_pending_cv);
3286	mutex_exit(&stmf_state.stmf_lock);
3287	return (STMF_SUCCESS);
3288}
3289
3290void
3291stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3292{
3293	stmf_i_local_port_t *ilport =
3294	    (stmf_i_local_port_t *)lport->lport_stmf_private;
3295	ilport->ilport_rtpid = rtpid;
3296	ilport->ilport_standby = 1;
3297}
3298
3299void
3300stmf_set_port_alua(stmf_local_port_t *lport)
3301{
3302	stmf_i_local_port_t *ilport =
3303	    (stmf_i_local_port_t *)lport->lport_stmf_private;
3304	ilport->ilport_alua = 1;
3305}
3306
3307stmf_status_t
3308stmf_register_local_port(stmf_local_port_t *lport)
3309{
3310	stmf_i_local_port_t *ilport;
3311	stmf_state_change_info_t ssci;
3312	int start_workers = 0;
3313
3314	mutex_enter(&stmf_state.stmf_lock);
3315	if (stmf_state.stmf_inventory_locked) {
3316		mutex_exit(&stmf_state.stmf_lock);
3317		return (STMF_BUSY);
3318	}
3319	ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3320	rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3321
3322	ilport->ilport_instance =
3323	    id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3324	if (ilport->ilport_instance == -1) {
3325		mutex_exit(&stmf_state.stmf_lock);
3326		return (STMF_FAILURE);
3327	}
3328	ilport->ilport_next = stmf_state.stmf_ilportlist;
3329	ilport->ilport_prev = NULL;
3330	if (ilport->ilport_next)
3331		ilport->ilport_next->ilport_prev = ilport;
3332	stmf_state.stmf_ilportlist = ilport;
3333	stmf_state.stmf_nlports++;
3334	if (lport->lport_pp) {
3335		((stmf_i_port_provider_t *)
3336		    (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3337	}
3338	ilport->ilport_tg =
3339	    stmf_lookup_group_for_target(lport->lport_id->ident,
3340	    lport->lport_id->ident_length);
3341
3342	/*
3343	 * rtpid will/must be set if this is a standby port
3344	 * only register ports that are not standby (proxy) ports
3345	 * and ports that are alua participants (ilport_alua == 1)
3346	 */
3347	if (ilport->ilport_standby == 0) {
3348		ilport->ilport_rtpid = atomic_inc_16_nv(&stmf_rtpid_counter);
3349	}
3350
3351	if (stmf_state.stmf_alua_state == 1 &&
3352	    ilport->ilport_standby == 0 &&
3353	    ilport->ilport_alua == 1) {
3354		stmf_ic_msg_t *ic_reg_port;
3355		stmf_ic_msg_status_t ic_ret;
3356		stmf_local_port_t *lport;
3357		lport = ilport->ilport_lport;
3358		ic_reg_port = ic_reg_port_msg_alloc(
3359		    lport->lport_id, ilport->ilport_rtpid,
3360		    0, NULL, stmf_proxy_msg_id);
3361		if (ic_reg_port) {
3362			ic_ret = ic_tx_msg(ic_reg_port);
3363			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3364				ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3365			} else {
3366				cmn_err(CE_WARN, "error on port registration "
3367				"port - %s", ilport->ilport_kstat_tgt_name);
3368			}
3369		}
3370	}
3371	STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3372	stmf_create_kstat_lport(ilport);
3373	if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3374		stmf_workers_state = STMF_WORKERS_ENABLING;
3375		start_workers = 1;
3376	}
3377	mutex_exit(&stmf_state.stmf_lock);
3378
3379	if (start_workers)
3380		stmf_worker_init();
3381
3382	/*  the default state of LPORT */
3383
3384	if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3385		ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3386	} else {
3387		ilport->ilport_prev_state = STMF_STATE_ONLINE;
3388		if (stmf_state.stmf_service_running) {
3389			ssci.st_rflags = 0;
3390			ssci.st_additional_info = NULL;
3391			(void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3392		}
3393	}
3394
3395	/* XXX: Generate event */
3396	return (STMF_SUCCESS);
3397}
3398
3399stmf_status_t
3400stmf_deregister_local_port(stmf_local_port_t *lport)
3401{
3402	stmf_i_local_port_t *ilport;
3403
3404	mutex_enter(&stmf_state.stmf_lock);
3405	if (stmf_state.stmf_inventory_locked) {
3406		mutex_exit(&stmf_state.stmf_lock);
3407		return (STMF_BUSY);
3408	}
3409
3410	/* dequeue all object requests from active queue */
3411	stmf_svc_kill_obj_requests(lport);
3412
3413	ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3414
3415	/*
3416	 * deregister ports that are not standby (proxy)
3417	 */
3418	if (stmf_state.stmf_alua_state == 1 &&
3419	    ilport->ilport_standby == 0 &&
3420	    ilport->ilport_alua == 1) {
3421		stmf_ic_msg_t *ic_dereg_port;
3422		stmf_ic_msg_status_t ic_ret;
3423		ic_dereg_port = ic_dereg_port_msg_alloc(
3424		    lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3425		if (ic_dereg_port) {
3426			ic_ret = ic_tx_msg(ic_dereg_port);
3427			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3428				stmf_proxy_msg_id++;
3429			}
3430		}
3431	}
3432
3433	if (ilport->ilport_nsessions == 0) {
3434		if (ilport->ilport_next)
3435			ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3436		if (ilport->ilport_prev)
3437			ilport->ilport_prev->ilport_next = ilport->ilport_next;
3438		else
3439			stmf_state.stmf_ilportlist = ilport->ilport_next;
3440		id_free(stmf_state.stmf_ilport_inst_space,
3441		    ilport->ilport_instance);
3442		rw_destroy(&ilport->ilport_lock);
3443		stmf_state.stmf_nlports--;
3444		if (lport->lport_pp) {
3445			((stmf_i_port_provider_t *)
3446			    (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3447		}
3448		ilport->ilport_tg = NULL;
3449		STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3450	} else {
3451		mutex_exit(&stmf_state.stmf_lock);
3452		return (STMF_BUSY);
3453	}
3454	if (ilport->ilport_kstat_info) {
3455		kmem_free(ilport->ilport_kstat_info->ks_data,
3456		    STMF_KSTAT_TGT_SZ);
3457		kstat_delete(ilport->ilport_kstat_info);
3458	}
3459	if (ilport->ilport_kstat_io) {
3460		kstat_delete(ilport->ilport_kstat_io);
3461		mutex_destroy(&ilport->ilport_kstat_lock);
3462	}
3463	mutex_exit(&stmf_state.stmf_lock);
3464	return (STMF_SUCCESS);
3465}
3466
3467/*
3468 * Rport id/instance mappings remain valid until STMF is unloaded
3469 */
3470static int
3471stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3472{
3473	const	stmf_i_remote_port_t	*irport1 = void_irport1;
3474	const	stmf_i_remote_port_t	*irport2 = void_irport2;
3475	int			result;
3476
3477	/* Sort by code set then ident */
3478	if (irport1->irport_id->code_set <
3479	    irport2->irport_id->code_set) {
3480		return (-1);
3481	} else if (irport1->irport_id->code_set >
3482	    irport2->irport_id->code_set) {
3483		return (1);
3484	}
3485
3486	/* Next by ident length */
3487	if (irport1->irport_id->ident_length <
3488	    irport2->irport_id->ident_length) {
3489		return (-1);
3490	} else if (irport1->irport_id->ident_length >
3491	    irport2->irport_id->ident_length) {
3492		return (1);
3493	}
3494
3495	/* Code set and ident length both match, now compare idents */
3496	result = memcmp(irport1->irport_id->ident,
3497	    irport2->irport_id->ident,
3498	    irport1->irport_id->ident_length);
3499
3500	if (result < 0) {
3501		return (-1);
3502	} else if (result > 0) {
3503		return (1);
3504	}
3505
3506	return (0);
3507}
3508
3509static stmf_i_remote_port_t *
3510stmf_irport_create(scsi_devid_desc_t *rport_devid)
3511{
3512	int			alloc_len;
3513	stmf_i_remote_port_t	*irport;
3514
3515	/*
3516	 * Lookup will bump the refcnt if there's an existing rport
3517	 * context for this identifier.
3518	 */
3519	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3520
3521	alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3522	    rport_devid->ident_length - 1;
3523	irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3524	if (irport == NULL) {
3525		return (NULL);
3526	}
3527
3528	irport->irport_instance =
3529	    id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3530	if (irport->irport_instance == -1) {
3531		kmem_free(irport, alloc_len);
3532		return (NULL);
3533	}
3534
3535	irport->irport_id =
3536	    (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3537	bcopy(rport_devid, irport->irport_id,
3538	    sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3539	irport->irport_refcnt = 1;
3540	mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3541	irport->irport_rdstart_timestamp = LLONG_MAX;
3542	irport->irport_wrstart_timestamp = LLONG_MAX;
3543
3544	return (irport);
3545}
3546
3547static void
3548stmf_irport_destroy(stmf_i_remote_port_t *irport)
3549{
3550	stmf_destroy_kstat_rport(irport);
3551	id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3552	mutex_destroy(&irport->irport_mutex);
3553	kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3554	    irport->irport_id->ident_length - 1);
3555}
3556
3557static void
3558stmf_create_kstat_rport(stmf_i_remote_port_t *irport)
3559{
3560	scsi_devid_desc_t *id = irport->irport_id;
3561	char ks_nm[KSTAT_STRLEN];
3562	stmf_kstat_rport_info_t *ks_info;
3563	stmf_kstat_rport_estat_t *ks_estat;
3564	char *ident = NULL;
3565
3566	ks_info = kmem_zalloc(sizeof (*ks_info), KM_NOSLEEP);
3567	if (ks_info == NULL)
3568		goto err_out;
3569
3570	(void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_%"PRIxPTR"",
3571	    (uintptr_t)irport);
3572	irport->irport_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
3573	    ks_nm, "misc", KSTAT_TYPE_NAMED,
3574	    STMF_KSTAT_RPORT_DATAMAX - STMF_RPORT_INFO_LIMIT,
3575	    KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE);
3576	if (irport->irport_kstat_info == NULL) {
3577		kmem_free(ks_info, sizeof (*ks_info));
3578		goto err_out;
3579	}
3580
3581	irport->irport_kstat_info->ks_data = ks_info;
3582	irport->irport_kstat_info->ks_private = irport;
3583	irport->irport_kstat_info->ks_update = stmf_kstat_rport_update;
3584	ident = kmem_alloc(id->ident_length + 1, KM_NOSLEEP);
3585	if (ident == NULL) {
3586		kstat_delete(irport->irport_kstat_info);
3587		irport->irport_kstat_info = NULL;
3588		kmem_free(ks_info, sizeof (*ks_info));
3589		goto err_out;
3590	}
3591
3592	(void) memcpy(ident, id->ident, id->ident_length);
3593	ident[id->ident_length] = '\0';
3594	kstat_named_init(&ks_info->i_rport_name, "name", KSTAT_DATA_STRING);
3595	kstat_named_init(&ks_info->i_protocol, "protocol",
3596	    KSTAT_DATA_STRING);
3597
3598	kstat_named_setstr(&ks_info->i_rport_name, ident);
3599	kstat_named_setstr(&ks_info->i_protocol,
3600	    protocol_ident[irport->irport_id->protocol_id]);
3601	irport->irport_kstat_info->ks_lock = &irport->irport_mutex;
3602	irport->irport_info_dirty = B_TRUE;
3603	kstat_install(irport->irport_kstat_info);
3604
3605	(void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_io_%"PRIxPTR"",
3606	    (uintptr_t)irport);
3607	irport->irport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, ks_nm,
3608	    "io", KSTAT_TYPE_IO, 1, 0);
3609	if (irport->irport_kstat_io == NULL)
3610		goto err_out;
3611
3612	irport->irport_kstat_io->ks_lock = &irport->irport_mutex;
3613	kstat_install(irport->irport_kstat_io);
3614
3615	(void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_st_%"PRIxPTR"",
3616	    (uintptr_t)irport);
3617	irport->irport_kstat_estat = kstat_create(STMF_MODULE_NAME, 0, ks_nm,
3618	    "misc", KSTAT_TYPE_NAMED,
3619	    sizeof (*ks_estat) / sizeof (kstat_named_t), 0);
3620	if (irport->irport_kstat_estat == NULL)
3621		goto err_out;
3622
3623	ks_estat = (stmf_kstat_rport_estat_t *)KSTAT_NAMED_PTR(
3624	    irport->irport_kstat_estat);
3625	kstat_named_init(&ks_estat->i_rport_read_latency,
3626	    "rlatency", KSTAT_DATA_UINT64);
3627	kstat_named_init(&ks_estat->i_rport_write_latency,
3628	    "wlatency", KSTAT_DATA_UINT64);
3629	kstat_named_init(&ks_estat->i_nread_tasks, "rntasks",
3630	    KSTAT_DATA_UINT64);
3631	kstat_named_init(&ks_estat->i_nwrite_tasks, "wntasks",
3632	    KSTAT_DATA_UINT64);
3633	irport->irport_kstat_estat->ks_lock = &irport->irport_mutex;
3634	kstat_install(irport->irport_kstat_estat);
3635
3636	return;
3637
3638err_out:
3639	(void) memcpy(ks_nm, id->ident, MAX(KSTAT_STRLEN - 1,
3640	    id->ident_length));
3641	ks_nm[id->ident_length] = '\0';
3642	cmn_err(CE_WARN, "STMF: remote port kstat creation failed: %s", ks_nm);
3643}
3644
3645static void
3646stmf_destroy_kstat_rport(stmf_i_remote_port_t *irport)
3647{
3648	if (irport->irport_kstat_io != NULL) {
3649		kstat_delete(irport->irport_kstat_io);
3650	}
3651	if (irport->irport_kstat_estat != NULL) {
3652		kstat_delete(irport->irport_kstat_estat);
3653	}
3654	if (irport->irport_kstat_info != NULL) {
3655		stmf_kstat_rport_info_t *ks_info;
3656		kstat_named_t *knp;
3657		void *ptr;
3658		int i;
3659
3660		ks_info = (stmf_kstat_rport_info_t *)KSTAT_NAMED_PTR(
3661		    irport->irport_kstat_info);
3662		kstat_delete(irport->irport_kstat_info);
3663		ptr = KSTAT_NAMED_STR_PTR(&ks_info->i_rport_name);
3664		kmem_free(ptr, KSTAT_NAMED_STR_BUFLEN(&ks_info->i_rport_name));
3665
3666		for (i = 0, knp = ks_info->i_rport_uinfo;
3667		    i < STMF_RPORT_INFO_LIMIT; i++, knp++) {
3668			ptr = KSTAT_NAMED_STR_PTR(knp);
3669			if (ptr != NULL)
3670			kmem_free(ptr, KSTAT_NAMED_STR_BUFLEN(knp));
3671		}
3672		kmem_free(ks_info, sizeof (*ks_info));
3673	}
3674}
3675
3676static stmf_i_remote_port_t *
3677stmf_irport_register(scsi_devid_desc_t *rport_devid)
3678{
3679	stmf_i_remote_port_t	*irport;
3680
3681	mutex_enter(&stmf_state.stmf_lock);
3682
3683	/*
3684	 * Lookup will bump the refcnt if there's an existing rport
3685	 * context for this identifier.
3686	 */
3687	if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3688		mutex_exit(&stmf_state.stmf_lock);
3689		return (irport);
3690	}
3691
3692	irport = stmf_irport_create(rport_devid);
3693	if (irport == NULL) {
3694		mutex_exit(&stmf_state.stmf_lock);
3695		return (NULL);
3696	}
3697
3698	stmf_create_kstat_rport(irport);
3699	avl_add(&stmf_state.stmf_irportlist, irport);
3700	mutex_exit(&stmf_state.stmf_lock);
3701
3702	return (irport);
3703}
3704
3705static stmf_i_remote_port_t *
3706stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3707{
3708	stmf_i_remote_port_t	*irport;
3709	stmf_i_remote_port_t	tmp_irport;
3710
3711	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3712	tmp_irport.irport_id = rport_devid;
3713	irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3714	if (irport != NULL) {
3715		mutex_enter(&irport->irport_mutex);
3716		irport->irport_refcnt++;
3717		mutex_exit(&irport->irport_mutex);
3718	}
3719
3720	return (irport);
3721}
3722
3723static void
3724stmf_irport_deregister(stmf_i_remote_port_t *irport)
3725{
3726	/*
3727	 * If we were actually going to remove unreferenced remote ports
3728	 * we would want to acquire stmf_state.stmf_lock before getting
3729	 * the irport mutex.
3730	 *
3731	 * Instead we're just going to leave it there even if unreferenced.
3732	 */
3733	mutex_enter(&irport->irport_mutex);
3734	irport->irport_refcnt--;
3735	mutex_exit(&irport->irport_mutex);
3736}
3737
3738/*
3739 * Port provider has to make sure that register/deregister session and
3740 * port are serialized calls.
3741 */
3742stmf_status_t
3743stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3744{
3745	stmf_i_scsi_session_t *iss;
3746	stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3747	    lport->lport_stmf_private;
3748	uint8_t		lun[8];
3749
3750	/*
3751	 * Port state has to be online to register a scsi session. It is
3752	 * possible that we started an offline operation and a new SCSI
3753	 * session started at the same time (in that case also we are going
3754	 * to fail the registeration). But any other state is simply
3755	 * a bad port provider implementation.
3756	 */
3757	if (ilport->ilport_state != STMF_STATE_ONLINE) {
3758		if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3759			stmf_trace(lport->lport_alias, "Port is trying to "
3760			    "register a session while the state is neither "
3761			    "online nor offlining");
3762		}
3763		return (STMF_FAILURE);
3764	}
3765	bzero(lun, 8);
3766	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3767	if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3768		stmf_trace(lport->lport_alias, "Could not register "
3769		    "remote port during session registration");
3770		return (STMF_FAILURE);
3771	}
3772
3773	iss->iss_flags |= ISS_BEING_CREATED;
3774
3775	if (ss->ss_rport == NULL) {
3776		iss->iss_flags |= ISS_NULL_TPTID;
3777		ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3778		    ss->ss_rport_id);
3779		if (ss->ss_rport == NULL) {
3780			iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3781			stmf_trace(lport->lport_alias, "Device id to "
3782			    "remote port conversion failed");
3783			return (STMF_FAILURE);
3784		}
3785	} else {
3786		if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3787		    ss->ss_rport->rport_tptid_sz, NULL)) {
3788			iss->iss_flags &= ~ISS_BEING_CREATED;
3789			stmf_trace(lport->lport_alias, "Remote port "
3790			    "transport id validation failed");
3791			return (STMF_FAILURE);
3792		}
3793	}
3794
3795	/* sessions use the ilport_lock. No separate lock is required */
3796	iss->iss_lockp = &ilport->ilport_lock;
3797
3798	if (iss->iss_sm != NULL)
3799		cmn_err(CE_PANIC, "create lun map called with non NULL map");
3800	iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3801	    KM_SLEEP);
3802
3803	mutex_enter(&stmf_state.stmf_lock);
3804	rw_enter(&ilport->ilport_lock, RW_WRITER);
3805	(void) stmf_session_create_lun_map(ilport, iss);
3806	ilport->ilport_nsessions++;
3807	iss->iss_next = ilport->ilport_ss_list;
3808	ilport->ilport_ss_list = iss;
3809	rw_exit(&ilport->ilport_lock);
3810	mutex_exit(&stmf_state.stmf_lock);
3811
3812	iss->iss_creation_time = ddi_get_time();
3813	ss->ss_session_id = atomic_inc_64_nv(&stmf_session_counter);
3814	iss->iss_flags &= ~ISS_BEING_CREATED;
3815	/* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3816	iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3817	DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3818	    stmf_scsi_session_t *, ss);
3819	return (STMF_SUCCESS);
3820}
3821
3822stmf_status_t
3823stmf_add_rport_info(stmf_scsi_session_t *ss,
3824    const char *prop_name, const char *prop_value)
3825{
3826	stmf_i_scsi_session_t *iss = ss->ss_stmf_private;
3827	stmf_i_remote_port_t *irport = iss->iss_irport;
3828	kstat_named_t *knp;
3829	char *s;
3830	int i;
3831
3832	s = strdup(prop_value);
3833
3834	mutex_enter(irport->irport_kstat_info->ks_lock);
3835	/* Make sure the caller doesn't try to add already existing property */
3836	knp = KSTAT_NAMED_PTR(irport->irport_kstat_info);
3837	for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) {
3838		if (KSTAT_NAMED_STR_PTR(knp) == NULL)
3839			break;
3840
3841		ASSERT(strcmp(knp->name, prop_name) != 0);
3842	}
3843
3844	if (i == STMF_KSTAT_RPORT_DATAMAX) {
3845		mutex_exit(irport->irport_kstat_info->ks_lock);
3846		kmem_free(s, strlen(s) + 1);
3847		return (STMF_FAILURE);
3848	}
3849
3850	irport->irport_info_dirty = B_TRUE;
3851	kstat_named_init(knp, prop_name, KSTAT_DATA_STRING);
3852	kstat_named_setstr(knp, s);
3853	mutex_exit(irport->irport_kstat_info->ks_lock);
3854
3855	return (STMF_SUCCESS);
3856}
3857
3858void
3859stmf_remove_rport_info(stmf_scsi_session_t *ss,
3860    const char *prop_name)
3861{
3862	stmf_i_scsi_session_t *iss = ss->ss_stmf_private;
3863	stmf_i_remote_port_t *irport = iss->iss_irport;
3864	kstat_named_t *knp;
3865	char *s;
3866	int i;
3867	uint32_t len;
3868
3869	mutex_enter(irport->irport_kstat_info->ks_lock);
3870	knp = KSTAT_NAMED_PTR(irport->irport_kstat_info);
3871	for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) {
3872		if ((knp->name != NULL) && (strcmp(knp->name, prop_name) == 0))
3873			break;
3874	}
3875
3876	if (i == STMF_KSTAT_RPORT_DATAMAX) {
3877		mutex_exit(irport->irport_kstat_info->ks_lock);
3878		return;
3879	}
3880
3881	s = KSTAT_NAMED_STR_PTR(knp);
3882	len = KSTAT_NAMED_STR_BUFLEN(knp);
3883
3884	for (; i < STMF_KSTAT_RPORT_DATAMAX - 1; i++, knp++) {
3885		kstat_named_init(knp, knp[1].name, KSTAT_DATA_STRING);
3886		kstat_named_setstr(knp, KSTAT_NAMED_STR_PTR(&knp[1]));
3887	}
3888	kstat_named_init(knp, "", KSTAT_DATA_STRING);
3889
3890	irport->irport_info_dirty = B_TRUE;
3891	mutex_exit(irport->irport_kstat_info->ks_lock);
3892	kmem_free(s, len);
3893}
3894
3895static int
3896stmf_kstat_rport_update(kstat_t *ksp, int rw)
3897{
3898	stmf_i_remote_port_t *irport = ksp->ks_private;
3899	kstat_named_t *knp;
3900	uint_t ndata = 0;
3901	size_t dsize = 0;
3902	int i;
3903
3904	if (rw == KSTAT_WRITE)
3905		return (EACCES);
3906
3907	if (!irport->irport_info_dirty)
3908		return (0);
3909
3910	knp = KSTAT_NAMED_PTR(ksp);
3911	for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) {
3912		if (KSTAT_NAMED_STR_PTR(knp) == NULL)
3913			break;
3914		ndata++;
3915		dsize += KSTAT_NAMED_STR_BUFLEN(knp);
3916	}
3917
3918	ksp->ks_ndata = ndata;
3919	ksp->ks_data_size = sizeof (kstat_named_t) * ndata + dsize;
3920	irport->irport_info_dirty = B_FALSE;
3921
3922	return (0);
3923}
3924
3925void
3926stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3927{
3928	stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3929	    lport->lport_stmf_private;
3930	stmf_i_scsi_session_t *iss, **ppss;
3931	int found = 0;
3932	stmf_ic_msg_t *ic_session_dereg;
3933	stmf_status_t ic_ret = STMF_FAILURE;
3934	stmf_lun_map_t *sm;
3935	stmf_i_lu_t *ilu;
3936	uint16_t n;
3937	stmf_lun_map_ent_t *ent;
3938
3939	DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3940	    stmf_scsi_session_t *, ss);
3941
3942	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3943	if (ss->ss_rport_alias) {
3944		ss->ss_rport_alias = NULL;
3945	}
3946
3947try_dereg_ss_again:
3948	mutex_enter(&stmf_state.stmf_lock);
3949	atomic_and_32(&iss->iss_flags,
3950	    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3951	if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3952		mutex_exit(&stmf_state.stmf_lock);
3953		delay(1);
3954		goto try_dereg_ss_again;
3955	}
3956
3957	/* dereg proxy session if not standby port */
3958	if (stmf_state.stmf_alua_state == 1 &&
3959	    ilport->ilport_standby == 0 &&
3960	    ilport->ilport_alua == 1) {
3961		ic_session_dereg = ic_session_dereg_msg_alloc(
3962		    ss, stmf_proxy_msg_id);
3963		if (ic_session_dereg) {
3964			ic_ret = ic_tx_msg(ic_session_dereg);
3965			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3966				stmf_proxy_msg_id++;
3967			}
3968		}
3969	}
3970
3971	rw_enter(&ilport->ilport_lock, RW_WRITER);
3972	for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3973	    ppss = &((*ppss)->iss_next)) {
3974		if (iss == (*ppss)) {
3975			*ppss = (*ppss)->iss_next;
3976			found = 1;
3977			break;
3978		}
3979	}
3980	if (!found) {
3981		cmn_err(CE_PANIC, "Deregister session called for non existent"
3982		    " session");
3983	}
3984	ilport->ilport_nsessions--;
3985
3986	stmf_irport_deregister(iss->iss_irport);
3987	/*
3988	 * to avoid conflict with updating session's map,
3989	 * which only grab stmf_lock
3990	 */
3991	sm = iss->iss_sm;
3992	iss->iss_sm = NULL;
3993	iss->iss_hg = NULL;
3994
3995	rw_exit(&ilport->ilport_lock);
3996
3997	if (sm->lm_nentries) {
3998		for (n = 0; n < sm->lm_nentries; n++) {
3999			if ((ent = (stmf_lun_map_ent_t *)sm->lm_plus[n])
4000			    != NULL) {
4001				if (ent->ent_itl_datap) {
4002					stmf_do_itl_dereg(ent->ent_lu,
4003					    ent->ent_itl_datap,
4004					    STMF_ITL_REASON_IT_NEXUS_LOSS);
4005				}
4006				ilu = (stmf_i_lu_t *)
4007				    ent->ent_lu->lu_stmf_private;
4008				atomic_dec_32(&ilu->ilu_ref_cnt);
4009				kmem_free(sm->lm_plus[n],
4010				    sizeof (stmf_lun_map_ent_t));
4011			}
4012		}
4013		kmem_free(sm->lm_plus,
4014		    sizeof (stmf_lun_map_ent_t *) * sm->lm_nentries);
4015	}
4016	kmem_free(sm, sizeof (*sm));
4017
4018	if (iss->iss_flags & ISS_NULL_TPTID) {
4019		stmf_remote_port_free(ss->ss_rport);
4020	}
4021
4022	mutex_exit(&stmf_state.stmf_lock);
4023}
4024
4025
4026
4027stmf_i_scsi_session_t *
4028stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
4029{
4030	stmf_i_local_port_t *ilport;
4031	stmf_i_scsi_session_t *iss;
4032
4033	mutex_enter(&stmf_state.stmf_lock);
4034	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4035	    ilport = ilport->ilport_next) {
4036		rw_enter(&ilport->ilport_lock, RW_WRITER);
4037		for (iss = ilport->ilport_ss_list; iss != NULL;
4038		    iss = iss->iss_next) {
4039			if (iss->iss_ss->ss_session_id == session_id) {
4040				if (!stay_locked)
4041					rw_exit(&ilport->ilport_lock);
4042				mutex_exit(&stmf_state.stmf_lock);
4043				return (iss);
4044			}
4045		}
4046		rw_exit(&ilport->ilport_lock);
4047	}
4048	mutex_exit(&stmf_state.stmf_lock);
4049	return (NULL);
4050}
4051
4052void
4053stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4054{
4055	stmf_itl_data_t **itlpp;
4056	stmf_i_lu_t *ilu;
4057
4058	ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4059
4060	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4061	mutex_enter(&ilu->ilu_task_lock);
4062	for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4063	    itlpp = &(*itlpp)->itl_next) {
4064		if ((*itlpp) == itl)
4065			break;
4066	}
4067	ASSERT((*itlpp) != NULL);
4068	*itlpp = itl->itl_next;
4069	mutex_exit(&ilu->ilu_task_lock);
4070	lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4071	    (uint32_t)itl->itl_hdlrm_reason);
4072
4073	kmem_free(itl, sizeof (*itl));
4074}
4075
4076stmf_status_t
4077stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4078    stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4079{
4080	stmf_itl_data_t *itl;
4081	stmf_i_scsi_session_t *iss;
4082	stmf_lun_map_ent_t *lun_map_ent;
4083	stmf_i_lu_t *ilu;
4084	uint16_t n;
4085
4086	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4087	if (ss == NULL) {
4088		iss = stmf_session_id_to_issptr(session_id, 1);
4089		if (iss == NULL)
4090			return (STMF_NOT_FOUND);
4091	} else {
4092		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4093	}
4094
4095	mutex_enter(&stmf_state.stmf_lock);
4096	rw_enter(iss->iss_lockp, RW_WRITER);
4097	n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4098	lun_map_ent = (stmf_lun_map_ent_t *)
4099	    stmf_get_ent_from_map(iss->iss_sm, n);
4100	if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4101		rw_exit(iss->iss_lockp);
4102		mutex_exit(&stmf_state.stmf_lock);
4103		return (STMF_NOT_FOUND);
4104	}
4105	if (lun_map_ent->ent_itl_datap != NULL) {
4106		rw_exit(iss->iss_lockp);
4107		mutex_exit(&stmf_state.stmf_lock);
4108		return (STMF_ALREADY);
4109	}
4110
4111	itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4112	if (itl == NULL) {
4113		rw_exit(iss->iss_lockp);
4114		mutex_exit(&stmf_state.stmf_lock);
4115		return (STMF_ALLOC_FAILURE);
4116	}
4117
4118	itl->itl_ilu = ilu;
4119	itl->itl_session = iss;
4120	itl->itl_counter = 1;
4121	itl->itl_lun = n;
4122	itl->itl_handle = itl_handle;
4123
4124	mutex_enter(&ilu->ilu_task_lock);
4125	itl->itl_next = ilu->ilu_itl_list;
4126	ilu->ilu_itl_list = itl;
4127	mutex_exit(&ilu->ilu_task_lock);
4128	lun_map_ent->ent_itl_datap = itl;
4129	rw_exit(iss->iss_lockp);
4130	mutex_exit(&stmf_state.stmf_lock);
4131
4132	return (STMF_SUCCESS);
4133}
4134
4135void
4136stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4137{
4138	uint8_t old, new;
4139
4140	do {
4141		old = new = itl->itl_flags;
4142		if (old & STMF_ITL_BEING_TERMINATED)
4143			return;
4144		new |= STMF_ITL_BEING_TERMINATED;
4145	} while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4146	itl->itl_hdlrm_reason = hdlrm_reason;
4147
4148	ASSERT(itl->itl_counter);
4149
4150	if (atomic_dec_32_nv(&itl->itl_counter))
4151		return;
4152
4153	stmf_release_itl_handle(lu, itl);
4154}
4155
4156stmf_status_t
4157stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4158{
4159	stmf_i_lu_t *ilu;
4160	stmf_i_local_port_t *ilport;
4161	stmf_i_scsi_session_t *iss;
4162	stmf_lun_map_t *lm;
4163	stmf_lun_map_ent_t *ent;
4164	uint32_t nmaps, nu;
4165	stmf_itl_data_t **itl_list;
4166	int i;
4167
4168	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4169
4170dereg_itl_start:;
4171	nmaps = ilu->ilu_ref_cnt;
4172	if (nmaps == 0)
4173		return (STMF_NOT_FOUND);
4174	itl_list = (stmf_itl_data_t **)kmem_zalloc(
4175	    nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4176	mutex_enter(&stmf_state.stmf_lock);
4177	if (nmaps != ilu->ilu_ref_cnt) {
4178		/* Something changed, start all over */
4179		mutex_exit(&stmf_state.stmf_lock);
4180		kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4181		goto dereg_itl_start;
4182	}
4183	nu = 0;
4184	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4185	    ilport = ilport->ilport_next) {
4186		rw_enter(&ilport->ilport_lock, RW_WRITER);
4187		for (iss = ilport->ilport_ss_list; iss != NULL;
4188		    iss = iss->iss_next) {
4189			lm = iss->iss_sm;
4190			if (!lm)
4191				continue;
4192			for (i = 0; i < lm->lm_nentries; i++) {
4193				if (lm->lm_plus[i] == NULL)
4194					continue;
4195				ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4196				if ((ent->ent_lu == lu) &&
4197				    (ent->ent_itl_datap)) {
4198					itl_list[nu++] = ent->ent_itl_datap;
4199					ent->ent_itl_datap = NULL;
4200					if (nu == nmaps) {
4201						rw_exit(&ilport->ilport_lock);
4202						goto dai_scan_done;
4203					}
4204				}
4205			} /* lun table for a session */
4206		} /* sessions */
4207		rw_exit(&ilport->ilport_lock);
4208	} /* ports */
4209
4210dai_scan_done:
4211	mutex_exit(&stmf_state.stmf_lock);
4212
4213	for (i = 0; i < nu; i++) {
4214		stmf_do_itl_dereg(lu, itl_list[i],
4215		    STMF_ITL_REASON_DEREG_REQUEST);
4216	}
4217	kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4218
4219	return (STMF_SUCCESS);
4220}
4221
4222stmf_data_buf_t *
4223stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4224    uint32_t flags)
4225{
4226	stmf_i_scsi_task_t *itask =
4227	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4228	stmf_local_port_t *lport = task->task_lport;
4229	stmf_data_buf_t *dbuf;
4230	uint8_t ndx;
4231
4232	ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4233	if (ndx == 0xff)
4234		return (NULL);
4235	dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4236	    task, size, pminsize, flags);
4237	if (dbuf) {
4238		task->task_cur_nbufs++;
4239		itask->itask_allocated_buf_map |= (1 << ndx);
4240		dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4241		dbuf->db_handle = ndx;
4242		return (dbuf);
4243	}
4244
4245	return (NULL);
4246}
4247
4248stmf_status_t
4249stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4250{
4251	stmf_i_scsi_task_t *itask =
4252	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4253	stmf_local_port_t *lport = task->task_lport;
4254	uint8_t ndx;
4255	stmf_status_t ret;
4256
4257	ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4258	ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4259	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4260
4261	if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4262		return (STMF_FAILURE);
4263	if (lport->lport_ds->ds_setup_dbuf == NULL)
4264		return (STMF_FAILURE);
4265
4266	ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4267	if (ndx == 0xff)
4268		return (STMF_FAILURE);
4269	ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4270	if (ret == STMF_FAILURE)
4271		return (STMF_FAILURE);
4272	itask->itask_dbufs[ndx] = dbuf;
4273	task->task_cur_nbufs++;
4274	itask->itask_allocated_buf_map |= (1 << ndx);
4275	dbuf->db_handle = ndx;
4276
4277	return (STMF_SUCCESS);
4278}
4279
4280void
4281stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4282{
4283	stmf_i_scsi_task_t *itask =
4284	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4285	stmf_local_port_t *lport = task->task_lport;
4286
4287	ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4288	ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4289	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4290
4291	itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4292	task->task_cur_nbufs--;
4293	lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4294}
4295
4296void
4297stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4298{
4299	stmf_i_scsi_task_t *itask =
4300	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4301	stmf_local_port_t *lport = task->task_lport;
4302
4303	itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4304	task->task_cur_nbufs--;
4305	lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4306}
4307
4308stmf_data_buf_t *
4309stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4310{
4311	stmf_i_scsi_task_t *itask;
4312
4313	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4314	if (h > 3)
4315		return (NULL);
4316	return (itask->itask_dbufs[h]);
4317}
4318
4319/* ARGSUSED */
4320struct scsi_task *
4321stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4322    uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4323{
4324	stmf_lu_t *lu;
4325	stmf_i_scsi_session_t *iss;
4326	stmf_i_lu_t *ilu;
4327	stmf_i_scsi_task_t *itask;
4328	stmf_i_scsi_task_t **ppitask;
4329	scsi_task_t *task;
4330	uint8_t	*l;
4331	stmf_lun_map_ent_t *lun_map_ent;
4332	uint16_t cdb_length;
4333	uint16_t luNbr;
4334	uint8_t new_task = 0;
4335
4336	/*
4337	 * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4338	 * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4339	 * depend upon this alignment.
4340	 */
4341	if (cdb_length_in >= 16)
4342		cdb_length = cdb_length_in + 7;
4343	else
4344		cdb_length = 16 + 7;
4345	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4346	luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4347	rw_enter(iss->iss_lockp, RW_READER);
4348	lun_map_ent =
4349	    (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4350	if (!lun_map_ent) {
4351		lu = dlun0;
4352	} else {
4353		lu = lun_map_ent->ent_lu;
4354	}
4355
4356	ilu = lu->lu_stmf_private;
4357	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4358		rw_exit(iss->iss_lockp);
4359		return (NULL);
4360	}
4361
4362	/*
4363	 * if the LUN is being offlined or is offline then only command
4364	 * that are to query the LUN are allowed.  These are handled in
4365	 * stmf via the dlun0 vector.  It is possible that a race condition
4366	 * will cause other commands to arrive while the lun is in the
4367	 * process of being offlined.  Check for those and just let the
4368	 * protocol stack handle the error.
4369	 */
4370	if ((ilu->ilu_state == STMF_STATE_OFFLINING) ||
4371	    (ilu->ilu_state == STMF_STATE_OFFLINE)) {
4372		if (lu != dlun0) {
4373			rw_exit(iss->iss_lockp);
4374			return (NULL);
4375		}
4376	}
4377
4378	do {
4379		if (ilu->ilu_free_tasks == NULL) {
4380			new_task = 1;
4381			break;
4382		}
4383		mutex_enter(&ilu->ilu_task_lock);
4384		for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4385		    ((*ppitask)->itask_cdb_buf_size < cdb_length);
4386		    ppitask = &((*ppitask)->itask_lu_free_next))
4387			;
4388		if (*ppitask) {
4389			itask = *ppitask;
4390			*ppitask = (*ppitask)->itask_lu_free_next;
4391			ilu->ilu_ntasks_free--;
4392			if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4393				ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4394		} else {
4395			new_task = 1;
4396		}
4397		mutex_exit(&ilu->ilu_task_lock);
4398	/* CONSTCOND */
4399	} while (0);
4400
4401	if (!new_task) {
4402		/*
4403		 * Save the task_cdb pointer and zero per cmd fields.
4404		 * We know the task_cdb_length is large enough by task
4405		 * selection process above.
4406		 */
4407		uint8_t *save_cdb;
4408		uintptr_t t_start, t_end;
4409
4410		task = itask->itask_task;
4411		save_cdb = task->task_cdb;	/* save */
4412		t_start = (uintptr_t)&task->task_flags;
4413		t_end = (uintptr_t)&task->task_extended_cmd;
4414		bzero((void *)t_start, (size_t)(t_end - t_start));
4415		task->task_cdb = save_cdb;	/* restore */
4416		itask->itask_ncmds = 0;
4417	} else {
4418		task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4419		    cdb_length, AF_FORCE_NOSLEEP);
4420		if (task == NULL) {
4421			rw_exit(iss->iss_lockp);
4422			return (NULL);
4423		}
4424		task->task_lu = lu;
4425		task->task_cdb = (uint8_t *)task->task_port_private;
4426		if ((ulong_t)(task->task_cdb) & 7ul) {
4427			task->task_cdb = (uint8_t *)(((ulong_t)
4428			    (task->task_cdb) + 7ul) & ~(7ul));
4429		}
4430		itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4431		itask->itask_cdb_buf_size = cdb_length;
4432		mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4433		mutex_init(&itask->itask_mutex, NULL, MUTEX_DRIVER, NULL);
4434	}
4435
4436	/*
4437	 * Since a LUN can be mapped as different LUN ids to different initiator
4438	 * groups, we need to set LUN id for a new task and reset LUN id for
4439	 * a reused task.
4440	 */
4441	l = task->task_lun_no;
4442	l[0] = lun[0];
4443	l[1] = lun[1];
4444	l[2] = lun[2];
4445	l[3] = lun[3];
4446	l[4] = lun[4];
4447	l[5] = lun[5];
4448	l[6] = lun[6];
4449	l[7] = lun[7];
4450
4451	mutex_enter(&itask->itask_mutex);
4452	task->task_session = ss;
4453	task->task_lport = lport;
4454	task->task_cdb_length = cdb_length_in;
4455	itask->itask_flags = ITASK_IN_TRANSITION;
4456	itask->itask_waitq_time = 0;
4457	itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4458	itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4459	itask->itask_read_xfer = itask->itask_write_xfer = 0;
4460	itask->itask_audit_index = 0;
4461	bzero(&itask->itask_audit_records[0],
4462	    sizeof (stmf_task_audit_rec_t) * ITASK_TASK_AUDIT_DEPTH);
4463	mutex_exit(&itask->itask_mutex);
4464
4465	if (new_task) {
4466		if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4467			rw_exit(iss->iss_lockp);
4468			stmf_free(task);
4469			return (NULL);
4470		}
4471		mutex_enter(&ilu->ilu_task_lock);
4472		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4473			mutex_exit(&ilu->ilu_task_lock);
4474			rw_exit(iss->iss_lockp);
4475			stmf_free(task);
4476			return (NULL);
4477		}
4478		itask->itask_lu_next = ilu->ilu_tasks;
4479		if (ilu->ilu_tasks)
4480			ilu->ilu_tasks->itask_lu_prev = itask;
4481		ilu->ilu_tasks = itask;
4482		/* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4483		ilu->ilu_ntasks++;
4484		mutex_exit(&ilu->ilu_task_lock);
4485	}
4486
4487	itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4488	atomic_inc_32(itask->itask_ilu_task_cntr);
4489	itask->itask_start_time = ddi_get_lbolt();
4490
4491	if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4492	    lun_map_ent->ent_itl_datap) != NULL)) {
4493		atomic_inc_32(&itask->itask_itl_datap->itl_counter);
4494		task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4495	} else {
4496		itask->itask_itl_datap = NULL;
4497		task->task_lu_itl_handle = NULL;
4498	}
4499
4500	rw_exit(iss->iss_lockp);
4501	return (task);
4502}
4503
4504/* ARGSUSED */
4505static void
4506stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4507{
4508	stmf_i_scsi_task_t *itask =
4509	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4510	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4511
4512	ASSERT(rw_lock_held(iss->iss_lockp));
4513	ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0);
4514	ASSERT((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0);
4515	ASSERT((itask->itask_flags & ITASK_IN_TRANSITION) == 0);
4516	ASSERT((itask->itask_flags & ITASK_KNOWN_TO_LU) == 0);
4517	ASSERT(mutex_owned(&itask->itask_mutex));
4518
4519	itask->itask_flags = ITASK_IN_FREE_LIST;
4520	itask->itask_ncmds = 0;
4521	itask->itask_proxy_msg_id = 0;
4522	atomic_dec_32(itask->itask_ilu_task_cntr);
4523	itask->itask_worker_next = NULL;
4524	mutex_exit(&itask->itask_mutex);
4525
4526	mutex_enter(&ilu->ilu_task_lock);
4527	itask->itask_lu_free_next = ilu->ilu_free_tasks;
4528	ilu->ilu_free_tasks = itask;
4529	ilu->ilu_ntasks_free++;
4530	if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4531		cv_signal(&ilu->ilu_offline_pending_cv);
4532	mutex_exit(&ilu->ilu_task_lock);
4533}
4534
4535void
4536stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4537{
4538	uint32_t	num_to_release, ndx;
4539	stmf_i_scsi_task_t *itask;
4540	stmf_lu_t	*lu = ilu->ilu_lu;
4541
4542	ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4543
4544	/* free half of the minimal free of the free tasks */
4545	num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4546	if (!num_to_release) {
4547		return;
4548	}
4549	for (ndx = 0; ndx < num_to_release; ndx++) {
4550		mutex_enter(&ilu->ilu_task_lock);
4551		itask = ilu->ilu_free_tasks;
4552		if (itask == NULL) {
4553			mutex_exit(&ilu->ilu_task_lock);
4554			break;
4555		}
4556		ilu->ilu_free_tasks = itask->itask_lu_free_next;
4557		ilu->ilu_ntasks_free--;
4558		mutex_exit(&ilu->ilu_task_lock);
4559
4560		lu->lu_task_free(itask->itask_task);
4561		mutex_enter(&ilu->ilu_task_lock);
4562		if (itask->itask_lu_next)
4563			itask->itask_lu_next->itask_lu_prev =
4564			    itask->itask_lu_prev;
4565		if (itask->itask_lu_prev)
4566			itask->itask_lu_prev->itask_lu_next =
4567			    itask->itask_lu_next;
4568		else
4569			ilu->ilu_tasks = itask->itask_lu_next;
4570
4571		ilu->ilu_ntasks--;
4572		mutex_exit(&ilu->ilu_task_lock);
4573		stmf_free(itask->itask_task);
4574	}
4575}
4576
4577/*
4578 * Called with stmf_lock held
4579 */
4580void
4581stmf_check_freetask()
4582{
4583	stmf_i_lu_t *ilu;
4584	clock_t	endtime = ddi_get_lbolt() + drv_usectohz(10000);
4585
4586	/* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4587	while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4588		stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4589		if (!ilu->ilu_ntasks_min_free) {
4590			ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4591			continue;
4592		}
4593		ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4594		mutex_exit(&stmf_state.stmf_lock);
4595		stmf_task_lu_check_freelist(ilu);
4596		/*
4597		 * we do not care about the accuracy of
4598		 * ilu_ntasks_min_free, so we don't lock here
4599		 */
4600		ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4601		mutex_enter(&stmf_state.stmf_lock);
4602		ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4603		cv_broadcast(&stmf_state.stmf_cv);
4604		if (ddi_get_lbolt() >= endtime)
4605			break;
4606	}
4607}
4608
4609/*
4610 * Since this method is looking to find tasks that are stuck, lost, or senile
4611 * it should be more willing to give up scaning during this time period. This
4612 * is why mutex_tryenter is now used instead of the standard mutex_enter.
4613 * There has been at least one case were the following occurred.
4614 *
4615 * 1) The iscsit_deferred() method is trying to register a session and
4616 *    needs the global lock which is held.
4617 * 2) Another thread which holds the global lock is trying to deregister a
4618 *    session and needs the session lock.
4619 * 3) A third thread is allocating a stmf task that has grabbed the session
4620 *    lock and is trying to grab the lun task lock.
4621 * 4) There's a timeout thread that has the lun task lock and is trying to grab
4622 *    a specific task lock.
4623 * 5) The thread that has the task lock is waiting for the ref count to go to
4624 *    zero.
4625 * 6) There's a task that would drop the count to zero, but it's in the task
4626 *    queue waiting to run and is stuck because of #1 is currently block.
4627 *
4628 * This method is number 4 in the above chain of events. Had this code
4629 * originally used mutex_tryenter the chain would have been broken and the
4630 * system wouldn't have hung. So, now this method uses mutex_tryenter and
4631 * you know why it does so.
4632 */
4633/* ---- Only one thread calls stmf_do_ilu_timeouts so no lock required ---- */
4634typedef struct stmf_bailout_cnt {
4635	int	no_ilu_lock;
4636	int	no_task_lock;
4637	int	tasks_checked;
4638} stmf_bailout_cnt_t;
4639
4640stmf_bailout_cnt_t stmf_bailout;
4641
4642static void
4643stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4644{
4645	clock_t l = ddi_get_lbolt();
4646	clock_t ps = drv_usectohz(1000000);
4647	stmf_i_scsi_task_t *itask;
4648	scsi_task_t *task;
4649	uint32_t to;
4650
4651	if (mutex_tryenter(&ilu->ilu_task_lock) == 0) {
4652		stmf_bailout.no_ilu_lock++;
4653		return;
4654	}
4655
4656	for (itask = ilu->ilu_tasks; itask != NULL;
4657	    itask = itask->itask_lu_next) {
4658		if (mutex_tryenter(&itask->itask_mutex) == 0) {
4659			stmf_bailout.no_task_lock++;
4660			continue;
4661		}
4662		stmf_bailout.tasks_checked++;
4663		if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4664		    ITASK_BEING_ABORTED)) {
4665			mutex_exit(&itask->itask_mutex);
4666			continue;
4667		}
4668		task = itask->itask_task;
4669		if (task->task_timeout == 0)
4670			to = stmf_default_task_timeout;
4671		else
4672			to = task->task_timeout;
4673
4674		if ((itask->itask_start_time + (to * ps)) > l) {
4675			mutex_exit(&itask->itask_mutex);
4676			continue;
4677		}
4678		mutex_exit(&itask->itask_mutex);
4679		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4680		    STMF_TIMEOUT, NULL);
4681	}
4682	mutex_exit(&ilu->ilu_task_lock);
4683}
4684
4685/*
4686 * Called with stmf_lock held
4687 */
4688void
4689stmf_check_ilu_timing()
4690{
4691	stmf_i_lu_t *ilu;
4692	clock_t	endtime = ddi_get_lbolt() + drv_usectohz(10000);
4693
4694	/* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4695	while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4696		stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4697		if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4698			if (ilu->ilu_task_cntr2 == 0) {
4699				ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4700				continue;
4701			}
4702		} else {
4703			if (ilu->ilu_task_cntr1 == 0) {
4704				ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4705				continue;
4706			}
4707		}
4708		/*
4709		 * If we are here then it means that there is some slowdown
4710		 * in tasks on this lu. We need to check.
4711		 */
4712		ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4713		mutex_exit(&stmf_state.stmf_lock);
4714		stmf_do_ilu_timeouts(ilu);
4715		mutex_enter(&stmf_state.stmf_lock);
4716		ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4717		cv_broadcast(&stmf_state.stmf_cv);
4718		if (ddi_get_lbolt() >= endtime)
4719			break;
4720	}
4721}
4722
4723/*
4724 * Kills all tasks on a lu except tm_task
4725 */
4726void
4727stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4728{
4729	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4730	stmf_i_scsi_task_t *itask;
4731
4732	mutex_enter(&ilu->ilu_task_lock);
4733	for (itask = ilu->ilu_tasks; itask != NULL;
4734	    itask = itask->itask_lu_next) {
4735		mutex_enter(&itask->itask_mutex);
4736		if (itask->itask_flags & ITASK_IN_FREE_LIST) {
4737			mutex_exit(&itask->itask_mutex);
4738			continue;
4739		}
4740		mutex_exit(&itask->itask_mutex);
4741		if (itask->itask_task == tm_task)
4742			continue;
4743		stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4744	}
4745	mutex_exit(&ilu->ilu_task_lock);
4746}
4747
4748void
4749stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4750{
4751	int i;
4752	uint8_t map;
4753
4754	if ((map = itask->itask_allocated_buf_map) == 0)
4755		return;
4756	for (i = 0; i < 4; i++) {
4757		if (map & 1) {
4758			stmf_data_buf_t *dbuf;
4759
4760			dbuf = itask->itask_dbufs[i];
4761			if (dbuf->db_xfer_start_timestamp) {
4762				stmf_lport_xfer_done(itask, dbuf);
4763			}
4764			if (dbuf->db_flags & DB_LU_DATA_BUF) {
4765				/*
4766				 * LU needs to clean up buffer.
4767				 * LU is required to free the buffer
4768				 * in the xfer_done handler.
4769				 */
4770				scsi_task_t *task = itask->itask_task;
4771				stmf_lu_t *lu = task->task_lu;
4772
4773				lu->lu_dbuf_free(task, dbuf);
4774				ASSERT(((itask->itask_allocated_buf_map>>i)
4775				    & 1) == 0); /* must be gone */
4776			} else {
4777				ASSERT(dbuf->db_lu_private == NULL);
4778				dbuf->db_lu_private = NULL;
4779				lport->lport_ds->ds_free_data_buf(
4780				    lport->lport_ds, dbuf);
4781			}
4782		}
4783		map >>= 1;
4784	}
4785	itask->itask_allocated_buf_map = 0;
4786}
4787
4788void
4789stmf_task_free(scsi_task_t *task)
4790{
4791	stmf_local_port_t *lport = task->task_lport;
4792	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4793	    task->task_stmf_private;
4794	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4795	    task->task_session->ss_stmf_private;
4796	stmf_lu_t *lu = task->task_lu;
4797
4798	stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4799	ASSERT(mutex_owned(&itask->itask_mutex));
4800	if ((lu != NULL) && (lu->lu_task_done != NULL))
4801		lu->lu_task_done(task);
4802	stmf_free_task_bufs(itask, lport);
4803	stmf_itl_task_done(itask);
4804	DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4805	    hrtime_t,
4806	    itask->itask_done_timestamp - itask->itask_start_timestamp);
4807	if (itask->itask_itl_datap) {
4808		if (atomic_dec_32_nv(&itask->itask_itl_datap->itl_counter) ==
4809		    0) {
4810			stmf_release_itl_handle(task->task_lu,
4811			    itask->itask_itl_datap);
4812		}
4813	}
4814
4815	/*
4816	 * To prevent a deadlock condition must release the itask_mutex,
4817	 * grab a reader lock on iss_lockp and then reacquire the itask_mutex.
4818	 */
4819	mutex_exit(&itask->itask_mutex);
4820	rw_enter(iss->iss_lockp, RW_READER);
4821	mutex_enter(&itask->itask_mutex);
4822
4823	lport->lport_task_free(task);
4824	if (itask->itask_worker) {
4825		atomic_dec_32(&stmf_cur_ntasks);
4826		atomic_dec_32(&itask->itask_worker->worker_ref_count);
4827	}
4828	/*
4829	 * After calling stmf_task_lu_free, the task pointer can no longer
4830	 * be trusted.
4831	 */
4832	stmf_task_lu_free(task, iss);
4833	rw_exit(iss->iss_lockp);
4834}
4835
4836void
4837stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4838{
4839	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4840	    task->task_stmf_private;
4841	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4842	int nv;
4843	uint32_t new;
4844	uint32_t ct;
4845	stmf_worker_t *w;
4846	uint8_t tm;
4847
4848	if (task->task_max_nbufs > 4)
4849		task->task_max_nbufs = 4;
4850	task->task_cur_nbufs = 0;
4851	/* Latest value of currently running tasks */
4852	ct = atomic_inc_32_nv(&stmf_cur_ntasks);
4853
4854	/* Select the next worker using round robin */
4855	mutex_enter(&stmf_worker_sel_mx);
4856	stmf_worker_sel_counter++;
4857	if (stmf_worker_sel_counter >= stmf_nworkers)
4858		stmf_worker_sel_counter = 0;
4859	nv = stmf_worker_sel_counter;
4860
4861	/* if the selected worker is not idle then bump to the next worker */
4862	if (stmf_workers[nv].worker_queue_depth > 0) {
4863		stmf_worker_sel_counter++;
4864		if (stmf_worker_sel_counter >= stmf_nworkers)
4865			stmf_worker_sel_counter = 0;
4866		nv = stmf_worker_sel_counter;
4867	}
4868	mutex_exit(&stmf_worker_sel_mx);
4869
4870	w = &stmf_workers[nv];
4871
4872	mutex_enter(&itask->itask_mutex);
4873	mutex_enter(&w->worker_lock);
4874
4875	itask->itask_worker = w;
4876
4877	/*
4878	 * Track max system load inside the worker as we already have the
4879	 * worker lock (no point implementing another lock). The service
4880	 * thread will do the comparisons and figure out the max overall
4881	 * system load.
4882	 */
4883	if (w->worker_max_sys_qdepth_pu < ct)
4884		w->worker_max_sys_qdepth_pu = ct;
4885
4886	new = itask->itask_flags;
4887	new |= ITASK_KNOWN_TO_TGT_PORT;
4888	if (task->task_mgmt_function) {
4889		tm = task->task_mgmt_function;
4890		if ((tm == TM_TARGET_RESET) ||
4891		    (tm == TM_TARGET_COLD_RESET) ||
4892		    (tm == TM_TARGET_WARM_RESET)) {
4893			new |= ITASK_DEFAULT_HANDLING;
4894		}
4895	} else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4896		new |= ITASK_DEFAULT_HANDLING;
4897	}
4898	new &= ~ITASK_IN_TRANSITION;
4899	itask->itask_flags = new;
4900
4901	stmf_itl_task_start(itask);
4902
4903	itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4904	itask->itask_ncmds = 1;
4905
4906	if ((task->task_flags & TF_INITIAL_BURST) &&
4907	    !(curthread->t_flag & T_INTR_THREAD)) {
4908		stmf_update_kstat_lu_io(task, dbuf);
4909		stmf_update_kstat_lport_io(task, dbuf);
4910		stmf_update_kstat_rport_io(task, dbuf);
4911	}
4912
4913	stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4914	if (dbuf) {
4915		itask->itask_allocated_buf_map = 1;
4916		itask->itask_dbufs[0] = dbuf;
4917		dbuf->db_handle = 0;
4918	} else {
4919		itask->itask_allocated_buf_map = 0;
4920		itask->itask_dbufs[0] = NULL;
4921	}
4922
4923	STMF_ENQUEUE_ITASK(w, itask);
4924
4925	mutex_exit(&w->worker_lock);
4926	mutex_exit(&itask->itask_mutex);
4927
4928	/*
4929	 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4930	 * was set between checking of ILU_RESET_ACTIVE and clearing of the
4931	 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4932	 */
4933	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4934		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4935	}
4936}
4937
4938static void
4939stmf_task_audit(stmf_i_scsi_task_t *itask,
4940    task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4941{
4942	stmf_task_audit_rec_t *ar;
4943
4944	mutex_enter(&itask->itask_audit_mutex);
4945	ar = &itask->itask_audit_records[itask->itask_audit_index++];
4946	itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4947	ar->ta_event = te;
4948	ar->ta_cmd_or_iof = cmd_or_iof;
4949	ar->ta_itask_flags = itask->itask_flags;
4950	ar->ta_dbuf = dbuf;
4951	gethrestime(&ar->ta_timestamp);
4952	mutex_exit(&itask->itask_audit_mutex);
4953}
4954
4955
4956/*
4957 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4958 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4959 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4960 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4961 * the LU will make this call only if we call the LU's abort entry point.
4962 * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4963 *
4964 * Same logic applies for the port.
4965 *
4966 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4967 * and KNOWN_TO_TGT_PORT are reset.
4968 *
4969 * +++++++++++++++++++++++++++++++++++++++++++++++
4970 */
4971
4972stmf_status_t
4973stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
4974{
4975	stmf_status_t ret = STMF_SUCCESS;
4976
4977	stmf_i_scsi_task_t *itask =
4978	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4979
4980	stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4981
4982	mutex_enter(&itask->itask_mutex);
4983	if (ioflags & STMF_IOF_LU_DONE) {
4984		if (itask->itask_flags & ITASK_BEING_ABORTED) {
4985			mutex_exit(&itask->itask_mutex);
4986			return (STMF_ABORTED);
4987		}
4988		itask->itask_flags &= ~ITASK_KNOWN_TO_LU;
4989	}
4990	if ((itask->itask_flags & ITASK_BEING_ABORTED) != 0) {
4991		mutex_exit(&itask->itask_mutex);
4992		return (STMF_ABORTED);
4993	}
4994	mutex_exit(&itask->itask_mutex);
4995
4996#ifdef	DEBUG
4997	if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
4998		if (atomic_dec_32_nv((uint32_t *)&stmf_drop_buf_counter) == 1)
4999			return (STMF_SUCCESS);
5000	}
5001#endif
5002
5003	stmf_update_kstat_lu_io(task, dbuf);
5004	stmf_update_kstat_lport_io(task, dbuf);
5005	stmf_update_kstat_rport_io(task, dbuf);
5006	stmf_lport_xfer_start(itask, dbuf);
5007	if (ioflags & STMF_IOF_STATS_ONLY) {
5008		stmf_lport_xfer_done(itask, dbuf);
5009		return (STMF_SUCCESS);
5010	}
5011
5012	dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5013	ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5014
5015	/*
5016	 * Port provider may have already called the buffer callback in
5017	 * which case dbuf->db_xfer_start_timestamp will be 0.
5018	 */
5019	if (ret != STMF_SUCCESS) {
5020		dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5021		if (dbuf->db_xfer_start_timestamp != 0)
5022			stmf_lport_xfer_done(itask, dbuf);
5023	}
5024
5025	return (ret);
5026}
5027
5028void
5029stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5030{
5031	stmf_i_scsi_task_t *itask =
5032	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5033	stmf_i_local_port_t *ilport;
5034	stmf_worker_t *w = itask->itask_worker;
5035	uint32_t new;
5036	uint8_t update_queue_flags, free_it, queue_it;
5037
5038	stmf_lport_xfer_done(itask, dbuf);
5039
5040	stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5041
5042	/* Guard against unexpected completions from the lport */
5043	if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5044		dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5045	} else {
5046		/*
5047		 * This should never happen.
5048		 */
5049		ilport = task->task_lport->lport_stmf_private;
5050		ilport->ilport_unexpected_comp++;
5051		cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5052		    (void *)task, (void *)dbuf);
5053		return;
5054	}
5055
5056	mutex_enter(&itask->itask_mutex);
5057	mutex_enter(&w->worker_lock);
5058	new = itask->itask_flags;
5059	if (itask->itask_flags & ITASK_BEING_ABORTED) {
5060		mutex_exit(&w->worker_lock);
5061		mutex_exit(&itask->itask_mutex);
5062		return;
5063	}
5064	free_it = 0;
5065	if (iof & STMF_IOF_LPORT_DONE) {
5066		new &= ~ITASK_KNOWN_TO_TGT_PORT;
5067		task->task_completion_status = dbuf->db_xfer_status;
5068		free_it = 1;
5069	}
5070	/*
5071	 * If the task is known to LU then queue it. But if
5072	 * it is already queued (multiple completions) then
5073	 * just update the buffer information by grabbing the
5074	 * worker lock. If the task is not known to LU,
5075	 * completed/aborted, then see if we need to
5076	 * free this task.
5077	 */
5078	if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5079		free_it = 0;
5080		update_queue_flags = 1;
5081		if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5082			queue_it = 0;
5083		} else {
5084			queue_it = 1;
5085		}
5086	} else {
5087		update_queue_flags = 0;
5088		queue_it = 0;
5089	}
5090	itask->itask_flags = new;
5091
5092	if (update_queue_flags) {
5093		uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5094
5095		ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0);
5096		ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5097
5098		itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5099		if (queue_it) {
5100			STMF_ENQUEUE_ITASK(w, itask);
5101		}
5102		mutex_exit(&w->worker_lock);
5103		mutex_exit(&itask->itask_mutex);
5104		return;
5105	}
5106
5107	mutex_exit(&w->worker_lock);
5108	if (free_it) {
5109		if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5110		    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5111		    ITASK_BEING_ABORTED)) == 0) {
5112			stmf_task_free(task);
5113			return;
5114		}
5115	}
5116	mutex_exit(&itask->itask_mutex);
5117}
5118
5119stmf_status_t
5120stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5121{
5122	DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5123
5124	stmf_i_scsi_task_t *itask =
5125	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5126
5127	stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5128
5129	mutex_enter(&itask->itask_mutex);
5130	if (ioflags & STMF_IOF_LU_DONE) {
5131		if (itask->itask_flags & ITASK_BEING_ABORTED) {
5132			mutex_exit(&itask->itask_mutex);
5133			return (STMF_ABORTED);
5134		}
5135		itask->itask_flags &= ~ITASK_KNOWN_TO_LU;
5136	}
5137
5138	if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5139		mutex_exit(&itask->itask_mutex);
5140		return (STMF_SUCCESS);
5141	}
5142
5143	if (itask->itask_flags & ITASK_BEING_ABORTED) {
5144		mutex_exit(&itask->itask_mutex);
5145		return (STMF_ABORTED);
5146	}
5147	mutex_exit(&itask->itask_mutex);
5148
5149	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5150		task->task_status_ctrl = 0;
5151		task->task_resid = 0;
5152	} else if (task->task_cmd_xfer_length >
5153	    task->task_expected_xfer_length) {
5154		task->task_status_ctrl = TASK_SCTRL_OVER;
5155		task->task_resid = task->task_cmd_xfer_length -
5156		    task->task_expected_xfer_length;
5157	} else if (task->task_nbytes_transferred <
5158	    task->task_expected_xfer_length) {
5159		task->task_status_ctrl = TASK_SCTRL_UNDER;
5160		task->task_resid = task->task_expected_xfer_length -
5161		    task->task_nbytes_transferred;
5162	} else {
5163		task->task_status_ctrl = 0;
5164		task->task_resid = 0;
5165	}
5166	return (task->task_lport->lport_send_status(task, ioflags));
5167}
5168
5169void
5170stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5171{
5172	stmf_i_scsi_task_t *itask =
5173	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5174	stmf_worker_t *w = itask->itask_worker;
5175	uint32_t new;
5176	uint8_t free_it, queue_it;
5177
5178	stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5179
5180	mutex_enter(&itask->itask_mutex);
5181	mutex_enter(&w->worker_lock);
5182	new = itask->itask_flags;
5183	if (itask->itask_flags & ITASK_BEING_ABORTED) {
5184		mutex_exit(&w->worker_lock);
5185		mutex_exit(&itask->itask_mutex);
5186		return;
5187	}
5188	free_it = 0;
5189	if (iof & STMF_IOF_LPORT_DONE) {
5190		new &= ~ITASK_KNOWN_TO_TGT_PORT;
5191		free_it = 1;
5192	}
5193	/*
5194	 * If the task is known to LU then queue it. But if
5195	 * it is already queued (multiple completions) then
5196	 * just update the buffer information by grabbing the
5197	 * worker lock. If the task is not known to LU,
5198	 * completed/aborted, then see if we need to
5199	 * free this task.
5200	 */
5201	if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5202		free_it = 0;
5203		queue_it = 1;
5204		if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5205			cmn_err(CE_PANIC, "status completion received"
5206			    " when task is already in worker queue "
5207			    " task = %p", (void *)task);
5208		}
5209	} else {
5210		queue_it = 0;
5211	}
5212	itask->itask_flags = new;
5213	task->task_completion_status = s;
5214
5215	if (queue_it) {
5216		ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5217		itask->itask_cmd_stack[itask->itask_ncmds++] =
5218		    ITASK_CMD_STATUS_DONE;
5219
5220		STMF_ENQUEUE_ITASK(w, itask);
5221		mutex_exit(&w->worker_lock);
5222		mutex_exit(&itask->itask_mutex);
5223		return;
5224	}
5225
5226	mutex_exit(&w->worker_lock);
5227
5228	if (free_it) {
5229		if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5230		    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5231		    ITASK_BEING_ABORTED)) == 0) {
5232			stmf_task_free(task);
5233			return;
5234		} else {
5235			cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5236			    " is not done, itask %p itask_flags %x",
5237			    (void *)itask, itask->itask_flags);
5238		}
5239	}
5240	mutex_exit(&itask->itask_mutex);
5241}
5242
5243void
5244stmf_task_lu_done(scsi_task_t *task)
5245{
5246	stmf_i_scsi_task_t *itask =
5247	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5248	stmf_worker_t *w = itask->itask_worker;
5249
5250	mutex_enter(&itask->itask_mutex);
5251	mutex_enter(&w->worker_lock);
5252	if (itask->itask_flags & ITASK_BEING_ABORTED) {
5253		mutex_exit(&w->worker_lock);
5254		mutex_exit(&itask->itask_mutex);
5255		return;
5256	}
5257	if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5258		cmn_err(CE_PANIC, "task_lu_done received"
5259		    " when task is in worker queue "
5260		    " task = %p", (void *)task);
5261	}
5262	itask->itask_flags &= ~ITASK_KNOWN_TO_LU;
5263
5264	mutex_exit(&w->worker_lock);
5265	if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5266	    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5267	    ITASK_BEING_ABORTED)) == 0) {
5268		stmf_task_free(task);
5269		return;
5270	} else {
5271		cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5272		    " the task is still not done, task = %p", (void *)task);
5273	}
5274	mutex_exit(&itask->itask_mutex);
5275}
5276
5277void
5278stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5279{
5280	stmf_i_scsi_task_t *itask =
5281	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5282	stmf_worker_t *w;
5283
5284	stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5285
5286	mutex_enter(&itask->itask_mutex);
5287	if ((itask->itask_flags & ITASK_BEING_ABORTED) ||
5288	    ((itask->itask_flags & (ITASK_KNOWN_TO_TGT_PORT |
5289	    ITASK_KNOWN_TO_LU)) == 0)) {
5290		mutex_exit(&itask->itask_mutex);
5291		return;
5292	}
5293	itask->itask_flags |= ITASK_BEING_ABORTED;
5294	task->task_completion_status = s;
5295
5296	if (((w = itask->itask_worker) == NULL) ||
5297	    (itask->itask_flags & ITASK_IN_TRANSITION)) {
5298		mutex_exit(&itask->itask_mutex);
5299		return;
5300	}
5301
5302	/* Queue it and get out */
5303	if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5304		mutex_exit(&itask->itask_mutex);
5305		return;
5306	}
5307	mutex_enter(&w->worker_lock);
5308	STMF_ENQUEUE_ITASK(w, itask);
5309	mutex_exit(&w->worker_lock);
5310	mutex_exit(&itask->itask_mutex);
5311}
5312
5313void
5314stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5315{
5316	stmf_i_scsi_task_t *itask = NULL;
5317	uint32_t f, rf;
5318
5319	DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5320	    stmf_status_t, s);
5321
5322	switch (abort_cmd) {
5323	case STMF_QUEUE_ABORT_LU:
5324		stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5325		return;
5326	case STMF_QUEUE_TASK_ABORT:
5327		stmf_queue_task_for_abort(task, s);
5328		return;
5329	case STMF_REQUEUE_TASK_ABORT_LPORT:
5330		rf = ITASK_TGT_PORT_ABORT_CALLED;
5331		f = ITASK_KNOWN_TO_TGT_PORT;
5332		break;
5333	case STMF_REQUEUE_TASK_ABORT_LU:
5334		rf = ITASK_LU_ABORT_CALLED;
5335		f = ITASK_KNOWN_TO_LU;
5336		break;
5337	default:
5338		return;
5339	}
5340
5341	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5342	mutex_enter(&itask->itask_mutex);
5343	f |= ITASK_BEING_ABORTED | rf;
5344
5345	if ((itask->itask_flags & f) != f) {
5346		mutex_exit(&itask->itask_mutex);
5347		return;
5348	}
5349	itask->itask_flags &= ~rf;
5350	mutex_exit(&itask->itask_mutex);
5351
5352}
5353
5354/*
5355 * NOTE: stmf_abort_task_offline will release and then reacquire the
5356 * itask_mutex. This is required to prevent a lock order violation.
5357 */
5358void
5359stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5360{
5361	char			 info[STMF_CHANGE_INFO_LEN];
5362	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5363	unsigned long long	st;
5364
5365	stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5366	ASSERT(mutex_owned(&itask->itask_mutex));
5367	st = s;	/* gcc fix */
5368	if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5369		(void) snprintf(info, sizeof (info),
5370		    "task %p, lu failed to abort ret=%llx", (void *)task, st);
5371	} else if ((iof & STMF_IOF_LU_DONE) == 0) {
5372		(void) snprintf(info, sizeof (info),
5373		    "Task aborted but LU is not finished, task ="
5374		    "%p, s=%llx, iof=%x", (void *)task, st, iof);
5375	} else {
5376		/*
5377		 * LU abort successfully
5378		 */
5379		atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5380		return;
5381	}
5382
5383	stmf_abort_task_offline(task, 1, info);
5384}
5385
5386/*
5387 * NOTE: stmf_abort_task_offline will release and then reacquire the
5388 * itask_mutex. This is required to prevent a lock order violation.
5389 */
5390void
5391stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5392{
5393	char			info[STMF_CHANGE_INFO_LEN];
5394	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5395	unsigned long long	st;
5396
5397	ASSERT(mutex_owned(&itask->itask_mutex));
5398	stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5399	st = s;
5400	if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5401		(void) snprintf(info, sizeof (info),
5402		    "task %p, tgt port failed to abort ret=%llx", (void *)task,
5403		    st);
5404	} else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5405		(void) snprintf(info, sizeof (info),
5406		    "Task aborted but tgt port is not finished, "
5407		    "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5408	} else {
5409		/*
5410		 * LPORT abort successfully
5411		 */
5412		atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_TGT_PORT);
5413		return;
5414	}
5415
5416	stmf_abort_task_offline(task, 0, info);
5417}
5418
5419void
5420stmf_task_lport_aborted_unlocked(scsi_task_t *task, stmf_status_t s,
5421    uint32_t iof)
5422{
5423	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5424
5425	mutex_enter(&itask->itask_mutex);
5426	stmf_task_lport_aborted(task, s, iof);
5427	mutex_exit(&itask->itask_mutex);
5428}
5429
5430stmf_status_t
5431stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5432{
5433	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5434	    task->task_stmf_private;
5435	stmf_worker_t *w = itask->itask_worker;
5436	int i;
5437
5438	mutex_enter(&itask->itask_mutex);
5439	ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5440	mutex_enter(&w->worker_lock);
5441	if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5442		mutex_exit(&w->worker_lock);
5443		mutex_exit(&itask->itask_mutex);
5444		return (STMF_BUSY);
5445	}
5446	for (i = 0; i < itask->itask_ncmds; i++) {
5447		if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5448			mutex_exit(&w->worker_lock);
5449			mutex_exit(&itask->itask_mutex);
5450			return (STMF_SUCCESS);
5451		}
5452	}
5453	itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5454	if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5455		itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5456	} else {
5457		clock_t t = drv_usectohz(timeout * 1000);
5458		if (t == 0)
5459			t = 1;
5460		itask->itask_poll_timeout = ddi_get_lbolt() + t;
5461	}
5462	if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5463		STMF_ENQUEUE_ITASK(w, itask);
5464	}
5465	mutex_exit(&w->worker_lock);
5466	mutex_exit(&itask->itask_mutex);
5467	return (STMF_SUCCESS);
5468}
5469
5470stmf_status_t
5471stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5472{
5473	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5474	    task->task_stmf_private;
5475	stmf_worker_t *w = itask->itask_worker;
5476	int i;
5477
5478	mutex_enter(&itask->itask_mutex);
5479	ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5480	mutex_enter(&w->worker_lock);
5481	if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5482		mutex_exit(&w->worker_lock);
5483		mutex_exit(&itask->itask_mutex);
5484		return (STMF_BUSY);
5485	}
5486	for (i = 0; i < itask->itask_ncmds; i++) {
5487		if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5488			mutex_exit(&w->worker_lock);
5489			mutex_exit(&itask->itask_mutex);
5490			return (STMF_SUCCESS);
5491		}
5492	}
5493	itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5494	if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5495		itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5496	} else {
5497		clock_t t = drv_usectohz(timeout * 1000);
5498		if (t == 0)
5499			t = 1;
5500		itask->itask_poll_timeout = ddi_get_lbolt() + t;
5501	}
5502	if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5503		STMF_ENQUEUE_ITASK(w, itask);
5504	}
5505	mutex_exit(&w->worker_lock);
5506	mutex_exit(&itask->itask_mutex);
5507	return (STMF_SUCCESS);
5508}
5509
5510void
5511stmf_do_task_abort(scsi_task_t *task)
5512{
5513	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5514	stmf_lu_t		*lu;
5515	stmf_local_port_t	*lport;
5516	unsigned long long	 ret;
5517	uint32_t		 new = 0;
5518	uint8_t			 call_lu_abort, call_port_abort;
5519	char			 info[STMF_CHANGE_INFO_LEN];
5520
5521	lu = task->task_lu;
5522	lport = task->task_lport;
5523	mutex_enter(&itask->itask_mutex);
5524	new = itask->itask_flags;
5525	if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5526	    ITASK_LU_ABORT_CALLED)) == ITASK_KNOWN_TO_LU) {
5527		new |= ITASK_LU_ABORT_CALLED;
5528		call_lu_abort = 1;
5529	} else {
5530		call_lu_abort = 0;
5531	}
5532	itask->itask_flags = new;
5533
5534	if (call_lu_abort) {
5535		if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5536			ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5537		} else {
5538			ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5539		}
5540		if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5541			stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5542		} else if (ret == STMF_BUSY) {
5543			atomic_and_32(&itask->itask_flags,
5544			    ~ITASK_LU_ABORT_CALLED);
5545		} else if (ret != STMF_SUCCESS) {
5546			(void) snprintf(info, sizeof (info),
5547			    "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5548			stmf_abort_task_offline(task, 1, info);
5549		}
5550	} else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5551		if (ddi_get_lbolt() > (itask->itask_start_time +
5552		    STMF_SEC2TICK(lu->lu_abort_timeout?
5553		    lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5554			(void) snprintf(info, sizeof (info),
5555			    "lu abort timed out");
5556			stmf_abort_task_offline(itask->itask_task, 1, info);
5557		}
5558	}
5559
5560	/*
5561	 * NOTE: After the call to either stmf_abort_task_offline() or
5562	 * stmf_task_lu_abort() the itask_mutex was dropped and reacquired
5563	 * to avoid a deadlock situation with stmf_state.stmf_lock.
5564	 */
5565
5566	new = itask->itask_flags;
5567	if ((itask->itask_flags & (ITASK_KNOWN_TO_TGT_PORT |
5568	    ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5569		new |= ITASK_TGT_PORT_ABORT_CALLED;
5570		call_port_abort = 1;
5571	} else {
5572		call_port_abort = 0;
5573	}
5574	itask->itask_flags = new;
5575
5576	if (call_port_abort) {
5577		ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5578		if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5579			stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5580		} else if (ret == STMF_BUSY) {
5581			atomic_and_32(&itask->itask_flags,
5582			    ~ITASK_TGT_PORT_ABORT_CALLED);
5583		} else if (ret != STMF_SUCCESS) {
5584			(void) snprintf(info, sizeof (info),
5585			    "Abort failed by tgt port %p ret %llx",
5586			    (void *)lport, ret);
5587			stmf_abort_task_offline(task, 0, info);
5588		}
5589	} else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5590		if (ddi_get_lbolt() > (itask->itask_start_time +
5591		    STMF_SEC2TICK(lport->lport_abort_timeout?
5592		    lport->lport_abort_timeout :
5593		    ITASK_DEFAULT_ABORT_TIMEOUT))) {
5594			(void) snprintf(info, sizeof (info),
5595			    "lport abort timed out");
5596			stmf_abort_task_offline(itask->itask_task, 0, info);
5597		}
5598	}
5599	mutex_exit(&itask->itask_mutex);
5600}
5601
5602stmf_status_t
5603stmf_ctl(int cmd, void *obj, void *arg)
5604{
5605	stmf_status_t			ret;
5606	stmf_i_lu_t			*ilu;
5607	stmf_i_local_port_t		*ilport;
5608	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
5609
5610	mutex_enter(&stmf_state.stmf_lock);
5611	ret = STMF_INVALID_ARG;
5612	if (cmd & STMF_CMD_LU_OP) {
5613		ilu = stmf_lookup_lu((stmf_lu_t *)