xref: /illumos-gate/usr/src/uts/sun/io/scsi/adapters/sf.c (revision 48bbca81)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25  * Copyright (c) 2016 by Delphix. All rights reserved.
26  */
27 
28 /*
29  * sf - Solaris Fibre Channel driver
30  *
31  * This module implements some of the Fibre Channel FC-4 layer, converting
32  * from FC frames to SCSI and back.  (Note: no sequence management is done
33  * here, though.)
34  */
35 
36 #if defined(lint) && !defined(DEBUG)
37 #define	DEBUG	1
38 #endif
39 
40 /*
41  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
42  * Need to use the ugly RAID LUN mappings in FCP Annex D
43  * to prevent SCSA from barfing.  This *REALLY* needs to
44  * be addressed by the standards committee.
45  */
46 #define	RAID_LUNS	1
47 
48 #ifdef DEBUG
49 static int sfdebug = 0;
50 #include <sys/debug.h>
51 
52 #define	SF_DEBUG(level, args) \
53 	if (sfdebug >= (level)) sf_log args
54 #else
55 #define	SF_DEBUG(level, args)
56 #endif
57 
58 static int sf_bus_config_debug = 0;
59 
60 #include <sys/scsi/scsi.h>
61 #include <sys/fc4/fcal.h>
62 #include <sys/fc4/fcp.h>
63 #include <sys/fc4/fcal_linkapp.h>
64 #include <sys/socal_cq_defs.h>
65 #include <sys/fc4/fcal_transport.h>
66 #include <sys/fc4/fcio.h>
67 #include <sys/scsi/adapters/sfvar.h>
68 #include <sys/scsi/impl/scsi_reset_notify.h>
69 #include <sys/stat.h>
70 #include <sys/varargs.h>
71 #include <sys/var.h>
72 #include <sys/thread.h>
73 #include <sys/proc.h>
74 #include <sys/kstat.h>
75 #include <sys/devctl.h>
76 #include <sys/scsi/targets/ses.h>
77 #include <sys/callb.h>
78 #include <sys/sysmacros.h>
79 
80 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
81 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
82 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
83 static void sf_softstate_unlink(struct sf *);
84 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
85     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
86 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
87     ddi_bus_config_op_t op, void *arg);
88 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
89     scsi_hba_tran_t *, struct scsi_device *);
90 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
91     scsi_hba_tran_t *, struct scsi_device *);
92 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
93     int, int, int);
94 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
95 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
96     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
97 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
98 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
99 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
100 static int sf_scsi_reset_notify(struct scsi_address *, int,
101     void (*)(caddr_t), caddr_t);
102 static int sf_scsi_get_name(struct scsi_device *, char *, int);
103 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
104 static int sf_add_cr_pool(struct sf *);
105 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
106 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
107 static void sf_crpool_free(struct sf *);
108 static int sf_kmem_cache_constructor(void *, void *, int);
109 static void sf_kmem_cache_destructor(void *, void *);
110 static void sf_statec_callback(void *, int);
111 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
112 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
113 static void sf_els_callback(struct fcal_packet *);
114 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
115 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
116 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
117     struct sf_target *);
118 static void sf_reportlun_callback(struct fcal_packet *);
119 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
120     struct sf_target *);
121 static void sf_inq_callback(struct fcal_packet *);
122 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
123     int, caddr_t *, caddr_t *);
124 static void sf_els_free(struct fcal_packet *);
125 static struct sf_target *sf_create_target(struct sf *,
126     struct sf_els_hdr *, int, int64_t);
127 #ifdef RAID_LUNS
128 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
129 #else
130 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
131 #endif
132 static void sf_finish_init(struct sf *, int);
133 static void sf_offline_target(struct sf *, struct sf_target *);
134 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
135 static int sf_create_props(dev_info_t *, struct sf_target *, int);
136 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
137 static int sf_getcap(struct scsi_address *, char *, int);
138 static int sf_setcap(struct scsi_address *, char *, int, int);
139 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
140 static int sf_reset(struct scsi_address *, int);
141 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
142 static int sf_start(struct scsi_address *, struct scsi_pkt *);
143 static int sf_start_internal(struct sf *, struct sf_pkt *);
144 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
145 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
146 static int sf_dopoll(struct sf *, struct sf_pkt *);
147 static void sf_cmd_callback(struct fcal_packet *);
148 static void sf_throttle(struct sf *);
149 static void sf_watch(void *);
150 static void sf_throttle_start(struct sf *);
151 static void sf_check_targets(struct sf *);
152 static void sf_check_reset_delay(void *);
153 static int sf_target_timeout(struct sf *, struct sf_pkt *);
154 static void sf_force_lip(struct sf *);
155 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
156 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
157 /*PRINTFLIKE3*/
158 static void sf_log(struct sf *, int, const char *, ...);
159 static int sf_kstat_update(kstat_t *, int);
160 static int sf_open(dev_t *, int, int, cred_t *);
161 static int sf_close(dev_t, int, int, cred_t *);
162 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
163 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
164 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
165     ddi_eventcookie_t *);
166 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
167     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
168 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
169 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
170     ddi_eventcookie_t, void *);
171 
172 static void sf_hp_daemon(void *);
173 
174 /*
175  * this is required to be able to supply a control node
176  * where ioctls can be executed
177  */
178 struct cb_ops sf_cb_ops = {
179 	sf_open,			/* open */
180 	sf_close,			/* close */
181 	nodev,				/* strategy */
182 	nodev,				/* print */
183 	nodev,				/* dump */
184 	nodev,				/* read */
185 	nodev,				/* write */
186 	sf_ioctl,			/* ioctl */
187 	nodev,				/* devmap */
188 	nodev,				/* mmap */
189 	nodev,				/* segmap */
190 	nochpoll,			/* poll */
191 	ddi_prop_op,			/* cb_prop_op */
192 	0,				/* streamtab  */
193 	D_MP | D_NEW | D_HOTPLUG	/* driver flags */
194 
195 };
196 
197 /*
198  * autoconfiguration routines.
199  */
200 static struct dev_ops sf_ops = {
201 	DEVO_REV,		/* devo_rev, */
202 	0,			/* refcnt  */
203 	sf_info,		/* info */
204 	nulldev,		/* identify */
205 	nulldev,		/* probe */
206 	sf_attach,		/* attach */
207 	sf_detach,		/* detach */
208 	nodev,			/* reset */
209 	&sf_cb_ops,		/* driver operations */
210 	NULL,			/* bus operations */
211 	NULL,			/* power management */
212 	ddi_quiesce_not_supported,	/* devo_quiesce */
213 };
214 
215 #define	SF_NAME	"FC-AL FCP Nexus Driver"	/* Name of the module. */
216 static	char	sf_version[] = "1.72 08/19/2008"; /* version of the module */
217 
218 static struct modldrv modldrv = {
219 	&mod_driverops, /* Type of module. This one is a driver */
220 	SF_NAME,
221 	&sf_ops,	/* driver ops */
222 };
223 
224 static struct modlinkage modlinkage = {
225 	MODREV_1, (void *)&modldrv, NULL
226 };
227 
228 /* XXXXXX The following is here to handle broken targets -- remove it later */
229 static int sf_reportlun_forever = 0;
230 /* XXXXXX */
231 static int sf_lip_on_plogo = 0;
232 static int sf_els_retries = SF_ELS_RETRIES;
233 static struct sf *sf_head = NULL;
234 static int sf_target_scan_cnt = 4;
235 static int sf_pkt_scan_cnt = 5;
236 static int sf_pool_scan_cnt = 1800;
237 static void *sf_state = NULL;
238 static int sf_watchdog_init = 0;
239 static int sf_watchdog_time = 0;
240 static int sf_watchdog_timeout = 1;
241 static int sf_watchdog_tick;
242 static int sf_watch_running = 0;
243 static timeout_id_t sf_watchdog_id;
244 static timeout_id_t sf_reset_timeout_id;
245 static int sf_max_targets = SF_MAX_TARGETS;
246 static kmutex_t sf_global_mutex;
247 static int sf_core = 0;
248 int *sf_token = NULL; /* Must not be static or lint complains. */
249 static kcondvar_t sf_watch_cv;
250 extern pri_t minclsyspri;
251 static ddi_eventcookie_t	sf_insert_eid;
252 static ddi_eventcookie_t	sf_remove_eid;
253 
254 static ndi_event_definition_t	sf_event_defs[] = {
255 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
256 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
257 };
258 
259 #define	SF_N_NDI_EVENTS	\
260 	(sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
261 
262 #ifdef DEBUG
263 static int sf_lip_flag = 1;		/* bool: to allow LIPs */
264 static int sf_reset_flag = 1;		/* bool: to allow reset after LIP */
265 static int sf_abort_flag = 0;		/* bool: to do just one abort */
266 #endif
267 
268 extern int64_t ddi_get_lbolt64(void);
269 
270 /*
271  * for converting between target number (switch) and hard address/AL_PA
272  */
273 static uchar_t sf_switch_to_alpa[] = {
274 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
275 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
276 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
277 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
278 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
279 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
280 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
281 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
282 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
283 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
284 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
285 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
286 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
287 };
288 
289 static uchar_t sf_alpa_to_switch[] = {
290 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
291 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
292 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
293 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
294 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
295 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
296 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
297 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
298 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
299 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
300 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
301 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
302 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
303 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
304 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
305 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
306 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
307 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
308 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
309 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
310 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
311 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
312 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
313 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 };
315 
316 /*
317  * these macros call the proper transport-layer function given
318  * a particular transport
319  */
320 #define	soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
321 #define	soc_transport_poll(a, b, c, d)\
322 	(*a->fcal_ops->fcal_transport_poll)(b, c, d)
323 #define	soc_get_lilp_map(a, b, c, d, e)\
324 	(*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
325 #define	soc_force_lip(a, b, c, d, e)\
326 	(*a->fcal_ops->fcal_force_lip)(b, c, d, e)
327 #define	soc_abort(a, b, c, d, e)\
328 	(*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
329 #define	soc_force_reset(a, b, c, d)\
330 	(*a->fcal_ops->fcal_force_reset)(b, c, d)
331 #define	soc_add_ulp(a, b, c, d, e, f, g, h)\
332 	(*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
333 #define	soc_remove_ulp(a, b, c, d, e)\
334 	(*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
335 #define	soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
336 
337 
338 /* power management property defines (should be in a common include file?) */
339 #define	PM_HARDWARE_STATE_PROP		"pm-hardware-state"
340 #define	PM_NEEDS_SUSPEND_RESUME		"needs-suspend-resume"
341 
342 
343 /* node properties */
344 #define	NODE_WWN_PROP			"node-wwn"
345 #define	PORT_WWN_PROP			"port-wwn"
346 #define	LIP_CNT_PROP			"lip-count"
347 #define	TARGET_PROP			"target"
348 #define	LUN_PROP			"lun"
349 
350 
351 /*
352  * initialize this driver and install this module
353  */
354 int
_init(void)355 _init(void)
356 {
357 	int	i;
358 
359 	i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
360 	    SF_INIT_ITEMS);
361 	if (i != 0)
362 		return (i);
363 
364 	if ((i = scsi_hba_init(&modlinkage)) != 0) {
365 		ddi_soft_state_fini(&sf_state);
366 		return (i);
367 	}
368 
369 	mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
370 	sf_watch_running = 0;
371 	cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
372 
373 	if ((i = mod_install(&modlinkage)) != 0) {
374 		mutex_destroy(&sf_global_mutex);
375 		cv_destroy(&sf_watch_cv);
376 		scsi_hba_fini(&modlinkage);
377 		ddi_soft_state_fini(&sf_state);
378 		return (i);
379 	}
380 
381 	return (i);
382 }
383 
384 
385 /*
386  * remove this driver module from the system
387  */
388 int
_fini(void)389 _fini(void)
390 {
391 	int	i;
392 
393 	if ((i = mod_remove(&modlinkage)) == 0) {
394 		scsi_hba_fini(&modlinkage);
395 		mutex_destroy(&sf_global_mutex);
396 		cv_destroy(&sf_watch_cv);
397 		ddi_soft_state_fini(&sf_state);
398 	}
399 	return (i);
400 }
401 
402 
403 int
_info(struct modinfo * modinfop)404 _info(struct modinfo *modinfop)
405 {
406 	return (mod_info(&modlinkage, modinfop));
407 }
408 
409 /*
410  * Given the device number return the devinfo pointer or instance
411  */
412 /*ARGSUSED*/
413 static int
sf_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)414 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
415 {
416 	int		instance = SF_MINOR2INST(getminor((dev_t)arg));
417 	struct sf	*sf;
418 
419 	switch (infocmd) {
420 	case DDI_INFO_DEVT2DEVINFO:
421 		sf = ddi_get_soft_state(sf_state, instance);
422 		if (sf != NULL)
423 			*result = sf->sf_dip;
424 		else {
425 			*result = NULL;
426 			return (DDI_FAILURE);
427 		}
428 		break;
429 
430 	case DDI_INFO_DEVT2INSTANCE:
431 		*result = (void *)(uintptr_t)instance;
432 		break;
433 	default:
434 		return (DDI_FAILURE);
435 	}
436 	return (DDI_SUCCESS);
437 }
438 
439 /*
440  * either attach or resume this driver
441  */
442 static int
sf_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)443 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
444 {
445 	int instance;
446 	int mutex_initted = FALSE;
447 	uint_t ccount;
448 	size_t i, real_size;
449 	struct fcal_transport *handle;
450 	char buf[64];
451 	struct sf *sf, *tsf;
452 	scsi_hba_tran_t *tran = NULL;
453 	int	handle_bound = FALSE;
454 	kthread_t *tp;
455 
456 
457 	switch ((int)cmd) {
458 
459 	case DDI_RESUME:
460 
461 		/*
462 		 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
463 		 * so time to undo that and get going again by forcing a
464 		 * lip
465 		 */
466 
467 		instance = ddi_get_instance(dip);
468 
469 		sf = ddi_get_soft_state(sf_state, instance);
470 		SF_DEBUG(2, (sf, CE_CONT,
471 		    "sf_attach: DDI_RESUME for sf%d\n", instance));
472 		if (sf == NULL) {
473 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
474 			return (DDI_FAILURE);
475 		}
476 
477 		/*
478 		 * clear suspended flag so that normal operations can resume
479 		 */
480 		mutex_enter(&sf->sf_mutex);
481 		sf->sf_state &= ~SF_STATE_SUSPENDED;
482 		mutex_exit(&sf->sf_mutex);
483 
484 		/*
485 		 * force a login by setting our state to offline
486 		 */
487 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
488 		sf->sf_state = SF_STATE_OFFLINE;
489 
490 		/*
491 		 * call transport routine to register state change and
492 		 * ELS callback routines (to register us as a ULP)
493 		 */
494 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
495 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
496 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
497 
498 		/*
499 		 * call transport routine to force loop initialization
500 		 */
501 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
502 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
503 
504 		/*
505 		 * increment watchdog init flag, setting watchdog timeout
506 		 * if we are the first (since somebody has to do it)
507 		 */
508 		mutex_enter(&sf_global_mutex);
509 		if (!sf_watchdog_init++) {
510 			mutex_exit(&sf_global_mutex);
511 			sf_watchdog_id = timeout(sf_watch,
512 			    (caddr_t)0, sf_watchdog_tick);
513 		} else {
514 			mutex_exit(&sf_global_mutex);
515 		}
516 
517 		return (DDI_SUCCESS);
518 
519 	case DDI_ATTACH:
520 
521 		/*
522 		 * this instance attaching for the first time
523 		 */
524 
525 		instance = ddi_get_instance(dip);
526 
527 		if (ddi_soft_state_zalloc(sf_state, instance) !=
528 		    DDI_SUCCESS) {
529 			cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
530 			    instance);
531 			return (DDI_FAILURE);
532 		}
533 
534 		sf = ddi_get_soft_state(sf_state, instance);
535 		SF_DEBUG(4, (sf, CE_CONT,
536 		    "sf_attach: DDI_ATTACH for sf%d\n", instance));
537 		if (sf == NULL) {
538 			/* this shouldn't happen since we just allocated it */
539 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
540 			return (DDI_FAILURE);
541 		}
542 
543 		/*
544 		 * from this point on, if there's an error, we must de-allocate
545 		 * soft state before returning DDI_FAILURE
546 		 */
547 
548 		if ((handle = ddi_get_parent_data(dip)) == NULL) {
549 			cmn_err(CE_WARN,
550 			    "sf%d: failed to obtain transport handle",
551 			    instance);
552 			goto fail;
553 		}
554 
555 		/* fill in our soft state structure */
556 		sf->sf_dip = dip;
557 		sf->sf_state = SF_STATE_INIT;
558 		sf->sf_throttle = handle->fcal_cmdmax;
559 		sf->sf_sochandle = handle;
560 		sf->sf_socp = handle->fcal_handle;
561 		sf->sf_check_n_close = 0;
562 
563 		/* create a command/response buffer pool for this instance */
564 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
565 			cmn_err(CE_WARN,
566 			    "sf%d: failed to allocate command/response pool",
567 			    instance);
568 			goto fail;
569 		}
570 
571 		/* create a a cache for this instance */
572 		(void) sprintf(buf, "sf%d_cache", instance);
573 		sf->sf_pkt_cache = kmem_cache_create(buf,
574 		    sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
575 		    scsi_pkt_size(), 8,
576 		    sf_kmem_cache_constructor, sf_kmem_cache_destructor,
577 		    NULL, NULL, NULL, 0);
578 		if (sf->sf_pkt_cache == NULL) {
579 			cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
580 			    instance);
581 			goto fail;
582 		}
583 
584 		/* set up a handle and allocate memory for DMA */
585 		if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
586 		    fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
587 		    sf_lilp_dmahandle) != DDI_SUCCESS) {
588 			cmn_err(CE_WARN,
589 			    "sf%d: failed to allocate dma handle for lilp map",
590 			    instance);
591 			goto fail;
592 		}
593 		i = sizeof (struct fcal_lilp_map) + 1;
594 		if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
595 		    i, sf->sf_sochandle->
596 		    fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
597 		    (caddr_t *)&sf->sf_lilp_map, &real_size,
598 		    &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
599 			cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
600 			    instance);
601 			goto fail;
602 		}
603 		if (real_size < i) {
604 			/* no error message ??? */
605 			goto fail;		/* trouble allocating memory */
606 		}
607 
608 		/*
609 		 * set up the address for the DMA transfers (getting a cookie)
610 		 */
611 		if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
612 		    (caddr_t)sf->sf_lilp_map, real_size,
613 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
614 		    &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
615 			cmn_err(CE_WARN,
616 			    "sf%d: failed to bind dma handle for lilp map",
617 			    instance);
618 			goto fail;
619 		}
620 		handle_bound = TRUE;
621 		/* ensure only one cookie was allocated */
622 		if (ccount != 1) {
623 			goto fail;
624 		}
625 
626 		/* ensure LILP map and DMA cookie addresses are even?? */
627 		sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
628 		    sf_lilp_map + 1) & ~1);
629 		sf->sf_lilp_dmacookie.dmac_address = (sf->
630 		    sf_lilp_dmacookie.dmac_address + 1) & ~1;
631 
632 		/* set up all of our mutexes and condition variables */
633 		mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
634 		mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
635 		mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
636 		mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
637 		cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
638 		cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
639 
640 		mutex_initted = TRUE;
641 
642 		/* create our devctl minor node */
643 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
644 		    SF_INST2DEVCTL_MINOR(instance),
645 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
646 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
647 			    " for devctl", instance);
648 			goto fail;
649 		}
650 
651 		/* create fc minor node */
652 		if (ddi_create_minor_node(dip, "fc", S_IFCHR,
653 		    SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
654 		    0) != DDI_SUCCESS) {
655 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
656 			    " for fc", instance);
657 			goto fail;
658 		}
659 		/* allocate a SCSI transport structure */
660 		tran = scsi_hba_tran_alloc(dip, 0);
661 		if (tran == NULL) {
662 			/* remove all minor nodes created */
663 			ddi_remove_minor_node(dip, NULL);
664 			cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
665 			    instance);
666 			goto fail;
667 		}
668 
669 		/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
670 		scsi_size_clean(dip);		/* SCSI_SIZE_CLEAN_VERIFY ok */
671 
672 		/* save ptr to new transport structure and fill it in */
673 		sf->sf_tran = tran;
674 
675 		tran->tran_hba_private		= sf;
676 		tran->tran_tgt_private		= NULL;
677 		tran->tran_tgt_init		= sf_scsi_tgt_init;
678 		tran->tran_tgt_probe		= NULL;
679 		tran->tran_tgt_free		= sf_scsi_tgt_free;
680 
681 		tran->tran_start		= sf_start;
682 		tran->tran_abort		= sf_abort;
683 		tran->tran_reset		= sf_reset;
684 		tran->tran_getcap		= sf_getcap;
685 		tran->tran_setcap		= sf_setcap;
686 		tran->tran_init_pkt		= sf_scsi_init_pkt;
687 		tran->tran_destroy_pkt		= sf_scsi_destroy_pkt;
688 		tran->tran_dmafree		= sf_scsi_dmafree;
689 		tran->tran_sync_pkt		= sf_scsi_sync_pkt;
690 		tran->tran_reset_notify		= sf_scsi_reset_notify;
691 
692 		/*
693 		 * register event notification routines with scsa
694 		 */
695 		tran->tran_get_eventcookie	= sf_bus_get_eventcookie;
696 		tran->tran_add_eventcall	= sf_bus_add_eventcall;
697 		tran->tran_remove_eventcall	= sf_bus_remove_eventcall;
698 		tran->tran_post_event		= sf_bus_post_event;
699 
700 		/*
701 		 * register bus configure/unconfigure
702 		 */
703 		tran->tran_bus_config		= sf_scsi_bus_config;
704 		tran->tran_bus_unconfig		= sf_scsi_bus_unconfig;
705 
706 		/*
707 		 * allocate an ndi event handle
708 		 */
709 		sf->sf_event_defs = (ndi_event_definition_t *)
710 		    kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
711 
712 		bcopy(sf_event_defs, sf->sf_event_defs,
713 		    sizeof (sf_event_defs));
714 
715 		(void) ndi_event_alloc_hdl(dip, NULL,
716 		    &sf->sf_event_hdl, NDI_SLEEP);
717 
718 		sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
719 		sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
720 		sf->sf_events.ndi_event_defs = sf->sf_event_defs;
721 
722 		if (ndi_event_bind_set(sf->sf_event_hdl,
723 		    &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
724 			goto fail;
725 		}
726 
727 		tran->tran_get_name		= sf_scsi_get_name;
728 		tran->tran_get_bus_addr		= sf_scsi_get_bus_addr;
729 
730 		/* setup and attach SCSI hba transport */
731 		if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
732 		    fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
733 			cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
734 			    instance);
735 			goto fail;
736 		}
737 
738 		/* set up kstats */
739 		if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
740 		    "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
741 		    KSTAT_FLAG_VIRTUAL)) == NULL) {
742 			cmn_err(CE_WARN, "sf%d: failed to create kstat",
743 			    instance);
744 		} else {
745 			sf->sf_stats.version = 2;
746 			(void) sprintf(sf->sf_stats.drvr_name,
747 			"%s: %s", SF_NAME, sf_version);
748 			sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
749 			sf->sf_ksp->ks_private = sf;
750 			sf->sf_ksp->ks_update = sf_kstat_update;
751 			kstat_install(sf->sf_ksp);
752 		}
753 
754 		/* create the hotplug thread */
755 		mutex_enter(&sf->sf_hp_daemon_mutex);
756 		tp = thread_create(NULL, 0,
757 		    (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
758 		sf->sf_hp_tid = tp->t_did;
759 		mutex_exit(&sf->sf_hp_daemon_mutex);
760 
761 		/* add this soft state instance to the head of the list */
762 		mutex_enter(&sf_global_mutex);
763 		sf->sf_next = sf_head;
764 		tsf = sf_head;
765 		sf_head = sf;
766 
767 		/*
768 		 * find entry in list that has the same FC-AL handle (if any)
769 		 */
770 		while (tsf != NULL) {
771 			if (tsf->sf_socp == sf->sf_socp) {
772 				break;		/* found matching entry */
773 			}
774 			tsf = tsf->sf_next;
775 		}
776 
777 		if (tsf != NULL) {
778 			/* if we found a matching entry keep track of it */
779 			sf->sf_sibling = tsf;
780 		}
781 
782 		/*
783 		 * increment watchdog init flag, setting watchdog timeout
784 		 * if we are the first (since somebody has to do it)
785 		 */
786 		if (!sf_watchdog_init++) {
787 			mutex_exit(&sf_global_mutex);
788 			sf_watchdog_tick = sf_watchdog_timeout *
789 			    drv_usectohz(1000000);
790 			sf_watchdog_id = timeout(sf_watch,
791 			    NULL, sf_watchdog_tick);
792 		} else {
793 			mutex_exit(&sf_global_mutex);
794 		}
795 
796 		if (tsf != NULL) {
797 			/*
798 			 * set up matching entry to be our sibling
799 			 */
800 			mutex_enter(&tsf->sf_mutex);
801 			tsf->sf_sibling = sf;
802 			mutex_exit(&tsf->sf_mutex);
803 		}
804 
805 		/*
806 		 * create this property so that PM code knows we want
807 		 * to be suspended at PM time
808 		 */
809 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
810 		    PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
811 
812 		/* log the fact that we have a new device */
813 		ddi_report_dev(dip);
814 
815 		/*
816 		 * force a login by setting our state to offline
817 		 */
818 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
819 		sf->sf_state = SF_STATE_OFFLINE;
820 
821 		/*
822 		 * call transport routine to register state change and
823 		 * ELS callback routines (to register us as a ULP)
824 		 */
825 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
826 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
827 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
828 
829 		/*
830 		 * call transport routine to force loop initialization
831 		 */
832 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
833 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
834 		sf->sf_reset_time = ddi_get_lbolt64();
835 		return (DDI_SUCCESS);
836 
837 	default:
838 		return (DDI_FAILURE);
839 	}
840 
841 fail:
842 	cmn_err(CE_WARN, "sf%d: failed to attach", instance);
843 
844 	/*
845 	 * Unbind and free event set
846 	 */
847 	if (sf->sf_event_hdl) {
848 		(void) ndi_event_unbind_set(sf->sf_event_hdl,
849 		    &sf->sf_events, NDI_SLEEP);
850 		(void) ndi_event_free_hdl(sf->sf_event_hdl);
851 	}
852 
853 	if (sf->sf_event_defs) {
854 		kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
855 	}
856 
857 	if (sf->sf_tran != NULL) {
858 		scsi_hba_tran_free(sf->sf_tran);
859 	}
860 	while (sf->sf_cr_pool != NULL) {
861 		sf_crpool_free(sf);
862 	}
863 	if (sf->sf_lilp_dmahandle != NULL) {
864 		if (handle_bound) {
865 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
866 		}
867 		ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
868 	}
869 	if (sf->sf_pkt_cache != NULL) {
870 		kmem_cache_destroy(sf->sf_pkt_cache);
871 	}
872 	if (sf->sf_lilp_map != NULL) {
873 		ddi_dma_mem_free(&sf->sf_lilp_acchandle);
874 	}
875 	if (sf->sf_ksp != NULL) {
876 		kstat_delete(sf->sf_ksp);
877 	}
878 	if (mutex_initted) {
879 		mutex_destroy(&sf->sf_mutex);
880 		mutex_destroy(&sf->sf_cmd_mutex);
881 		mutex_destroy(&sf->sf_cr_mutex);
882 		mutex_destroy(&sf->sf_hp_daemon_mutex);
883 		cv_destroy(&sf->sf_cr_cv);
884 		cv_destroy(&sf->sf_hp_daemon_cv);
885 	}
886 	mutex_enter(&sf_global_mutex);
887 
888 	/*
889 	 * kill off the watchdog if we are the last instance
890 	 */
891 	if (!--sf_watchdog_init) {
892 		timeout_id_t tid = sf_watchdog_id;
893 		mutex_exit(&sf_global_mutex);
894 		(void) untimeout(tid);
895 	} else {
896 		mutex_exit(&sf_global_mutex);
897 	}
898 
899 	ddi_soft_state_free(sf_state, instance);
900 
901 	if (tran != NULL) {
902 		/* remove all minor nodes */
903 		ddi_remove_minor_node(dip, NULL);
904 	}
905 
906 	return (DDI_FAILURE);
907 }
908 
909 
910 /* ARGSUSED */
911 static int
sf_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)912 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
913 {
914 	struct sf		*sf;
915 	int			instance;
916 	int			i;
917 	struct sf_target	*target;
918 	timeout_id_t		tid;
919 
920 
921 
922 	/* NO OTHER THREADS ARE RUNNING */
923 
924 	instance = ddi_get_instance(dip);
925 
926 	if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
927 		cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
928 		return (DDI_FAILURE);
929 	}
930 
931 	switch (cmd) {
932 
933 	case DDI_SUSPEND:
934 		/*
935 		 * suspend our instance
936 		 */
937 
938 		SF_DEBUG(2, (sf, CE_CONT,
939 		    "sf_detach: DDI_SUSPEND for sf%d\n", instance));
940 		/*
941 		 * There is a race condition in socal where while doing
942 		 * callbacks if a ULP removes it self from the callback list
943 		 * the for loop in socal may panic as cblist is junk and
944 		 * while trying to get cblist->next the system will panic.
945 		 */
946 
947 		/* call transport to remove our unregister our callbacks */
948 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
949 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
950 
951 		/*
952 		 * begin process of clearing outstanding commands
953 		 * by issuing a lip
954 		 */
955 		sf_force_lip(sf);
956 
957 		/*
958 		 * toggle the device OFFLINE in order to cause
959 		 * outstanding commands to drain
960 		 */
961 		mutex_enter(&sf->sf_mutex);
962 		sf->sf_lip_cnt++;
963 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
964 		sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
965 		for (i = 0; i < sf_max_targets; i++) {
966 			target = sf->sf_targets[i];
967 			if (target != NULL) {
968 				struct sf_target *ntarget;
969 
970 				mutex_enter(&target->sft_mutex);
971 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
972 					target->sft_state |=
973 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
974 				}
975 				/* do this for all LUNs as well */
976 				for (ntarget = target->sft_next_lun;
977 				    ntarget;
978 				    ntarget = ntarget->sft_next_lun) {
979 					mutex_enter(&ntarget->sft_mutex);
980 					if (!(ntarget->sft_state &
981 					    SF_TARGET_OFFLINE)) {
982 						ntarget->sft_state |=
983 						    (SF_TARGET_BUSY |
984 						    SF_TARGET_MARK);
985 					}
986 					mutex_exit(&ntarget->sft_mutex);
987 				}
988 				mutex_exit(&target->sft_mutex);
989 			}
990 		}
991 		mutex_exit(&sf->sf_mutex);
992 		mutex_enter(&sf_global_mutex);
993 
994 		/*
995 		 * kill off the watchdog if we are the last instance
996 		 */
997 		if (!--sf_watchdog_init) {
998 			tid = sf_watchdog_id;
999 			mutex_exit(&sf_global_mutex);
1000 			(void) untimeout(tid);
1001 		} else {
1002 			mutex_exit(&sf_global_mutex);
1003 		}
1004 
1005 		return (DDI_SUCCESS);
1006 
1007 	case DDI_DETACH:
1008 		/*
1009 		 * detach this instance
1010 		 */
1011 
1012 		SF_DEBUG(2, (sf, CE_CONT,
1013 		    "sf_detach: DDI_DETACH for sf%d\n", instance));
1014 
1015 		/* remove this "sf" from the list of sf softstates */
1016 		sf_softstate_unlink(sf);
1017 
1018 		/*
1019 		 * prior to taking any DDI_DETACH actions, toggle the
1020 		 * device OFFLINE in order to cause outstanding
1021 		 * commands to drain
1022 		 */
1023 		mutex_enter(&sf->sf_mutex);
1024 		sf->sf_lip_cnt++;
1025 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1026 		sf->sf_state = SF_STATE_OFFLINE;
1027 		for (i = 0; i < sf_max_targets; i++) {
1028 			target = sf->sf_targets[i];
1029 			if (target != NULL) {
1030 				struct sf_target *ntarget;
1031 
1032 				mutex_enter(&target->sft_mutex);
1033 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1034 					target->sft_state |=
1035 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
1036 				}
1037 				for (ntarget = target->sft_next_lun;
1038 				    ntarget;
1039 				    ntarget = ntarget->sft_next_lun) {
1040 					mutex_enter(&ntarget->sft_mutex);
1041 					if (!(ntarget->sft_state &
1042 					    SF_TARGET_OFFLINE)) {
1043 						ntarget->sft_state |=
1044 						    (SF_TARGET_BUSY |
1045 						    SF_TARGET_MARK);
1046 					}
1047 					mutex_exit(&ntarget->sft_mutex);
1048 				}
1049 				mutex_exit(&target->sft_mutex);
1050 			}
1051 		}
1052 		mutex_exit(&sf->sf_mutex);
1053 
1054 		/* call transport to remove and unregister our callbacks */
1055 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1056 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1057 
1058 		/*
1059 		 * kill off the watchdog if we are the last instance
1060 		 */
1061 		mutex_enter(&sf_global_mutex);
1062 		if (!--sf_watchdog_init) {
1063 			tid = sf_watchdog_id;
1064 			mutex_exit(&sf_global_mutex);
1065 			(void) untimeout(tid);
1066 		} else {
1067 			mutex_exit(&sf_global_mutex);
1068 		}
1069 
1070 		/* signal sf_hp_daemon() to exit and wait for exit */
1071 		mutex_enter(&sf->sf_hp_daemon_mutex);
1072 		ASSERT(sf->sf_hp_tid);
1073 		sf->sf_hp_exit = 1;		/* flag exit */
1074 		cv_signal(&sf->sf_hp_daemon_cv);
1075 		mutex_exit(&sf->sf_hp_daemon_mutex);
1076 		thread_join(sf->sf_hp_tid);	/* wait for hotplug to exit */
1077 
1078 		/*
1079 		 * Unbind and free event set
1080 		 */
1081 		if (sf->sf_event_hdl) {
1082 			(void) ndi_event_unbind_set(sf->sf_event_hdl,
1083 			    &sf->sf_events, NDI_SLEEP);
1084 			(void) ndi_event_free_hdl(sf->sf_event_hdl);
1085 		}
1086 
1087 		if (sf->sf_event_defs) {
1088 			kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1089 		}
1090 
1091 		/* detach this instance of the HBA driver */
1092 		(void) scsi_hba_detach(dip);
1093 		scsi_hba_tran_free(sf->sf_tran);
1094 
1095 		/* deallocate/unbind DMA handle for lilp map */
1096 		if (sf->sf_lilp_map != NULL) {
1097 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1098 			if (sf->sf_lilp_dmahandle != NULL) {
1099 				ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1100 			}
1101 			ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1102 		}
1103 
1104 		/*
1105 		 * the kmem cache must be destroyed before free'ing
1106 		 * up the crpools
1107 		 *
1108 		 * our finagle of "ntot" and "nfree"
1109 		 * causes an ASSERT failure in "sf_cr_free()"
1110 		 * if the kmem cache is free'd after invoking
1111 		 * "sf_crpool_free()".
1112 		 */
1113 		kmem_cache_destroy(sf->sf_pkt_cache);
1114 
1115 		SF_DEBUG(2, (sf, CE_CONT,
1116 		    "sf_detach: sf_crpool_free() for instance 0x%x\n",
1117 		    instance));
1118 		while (sf->sf_cr_pool != NULL) {
1119 			/*
1120 			 * set ntot to nfree for this particular entry
1121 			 *
1122 			 * this causes sf_crpool_free() to update
1123 			 * the cr_pool list when deallocating this entry
1124 			 */
1125 			sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1126 			sf_crpool_free(sf);
1127 		}
1128 
1129 		/*
1130 		 * now that the cr_pool's are gone it's safe
1131 		 * to destroy all softstate mutex's and cv's
1132 		 */
1133 		mutex_destroy(&sf->sf_mutex);
1134 		mutex_destroy(&sf->sf_cmd_mutex);
1135 		mutex_destroy(&sf->sf_cr_mutex);
1136 		mutex_destroy(&sf->sf_hp_daemon_mutex);
1137 		cv_destroy(&sf->sf_cr_cv);
1138 		cv_destroy(&sf->sf_hp_daemon_cv);
1139 
1140 		/* remove all minor nodes from the device tree */
1141 		ddi_remove_minor_node(dip, NULL);
1142 
1143 		/* remove properties created during attach() */
1144 		ddi_prop_remove_all(dip);
1145 
1146 		/* remove kstat's if present */
1147 		if (sf->sf_ksp != NULL) {
1148 			kstat_delete(sf->sf_ksp);
1149 		}
1150 
1151 		SF_DEBUG(2, (sf, CE_CONT,
1152 		    "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1153 		    instance));
1154 		ddi_soft_state_free(sf_state, instance);
1155 		return (DDI_SUCCESS);
1156 
1157 	default:
1158 		SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1159 		    instance, (int)cmd));
1160 		return (DDI_FAILURE);
1161 	}
1162 }
1163 
1164 
1165 /*
1166  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1167  */
1168 static void
sf_softstate_unlink(struct sf * sf)1169 sf_softstate_unlink(struct sf *sf)
1170 {
1171 	struct sf	*sf_ptr;
1172 	struct sf	*sf_found_sibling;
1173 	struct sf	*sf_reposition = NULL;
1174 
1175 
1176 	mutex_enter(&sf_global_mutex);
1177 	while (sf_watch_running) {
1178 		/* Busy working the list -- wait */
1179 		cv_wait(&sf_watch_cv, &sf_global_mutex);
1180 	}
1181 	if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1182 		/*
1183 		 * we have a sibling so NULL out its reference to us
1184 		 */
1185 		mutex_enter(&sf_found_sibling->sf_mutex);
1186 		sf_found_sibling->sf_sibling = NULL;
1187 		mutex_exit(&sf_found_sibling->sf_mutex);
1188 	}
1189 
1190 	/* remove our instance from the global list */
1191 	if (sf == sf_head) {
1192 		/* we were at at head of the list */
1193 		sf_head = sf->sf_next;
1194 	} else {
1195 		/* find us in the list */
1196 		for (sf_ptr = sf_head;
1197 		    sf_ptr != NULL;
1198 		    sf_ptr = sf_ptr->sf_next) {
1199 			if (sf_ptr == sf) {
1200 				break;
1201 			}
1202 			/* remember this place */
1203 			sf_reposition = sf_ptr;
1204 		}
1205 		ASSERT(sf_ptr == sf);
1206 		ASSERT(sf_reposition != NULL);
1207 
1208 		sf_reposition->sf_next = sf_ptr->sf_next;
1209 	}
1210 	mutex_exit(&sf_global_mutex);
1211 }
1212 
1213 
1214 static int
sf_scsi_bus_config(dev_info_t * parent,uint_t flag,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)1215 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1216     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1217 {
1218 	int64_t		reset_delay;
1219 	struct sf	*sf;
1220 
1221 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1222 	ASSERT(sf);
1223 
1224 	reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1225 	    (ddi_get_lbolt64() - sf->sf_reset_time);
1226 	if (reset_delay < 0)
1227 		reset_delay = 0;
1228 
1229 	if (sf_bus_config_debug)
1230 		flag |= NDI_DEVI_DEBUG;
1231 
1232 	return (ndi_busop_bus_config(parent, flag, op,
1233 	    arg, childp, (clock_t)reset_delay));
1234 }
1235 
1236 static int
sf_scsi_bus_unconfig(dev_info_t * parent,uint_t flag,ddi_bus_config_op_t op,void * arg)1237 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1238     ddi_bus_config_op_t op, void *arg)
1239 {
1240 	if (sf_bus_config_debug)
1241 		flag |= NDI_DEVI_DEBUG;
1242 
1243 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1244 }
1245 
1246 
1247 /*
1248  * called by transport to initialize a SCSI target
1249  */
1250 /* ARGSUSED */
1251 static int
sf_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)1252 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1253     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1254 {
1255 #ifdef RAID_LUNS
1256 	int lun;
1257 #else
1258 	int64_t lun;
1259 #endif
1260 	struct sf_target *target;
1261 	struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1262 	int i, t_len;
1263 	unsigned int lip_cnt;
1264 	unsigned char wwn[FC_WWN_SIZE];
1265 
1266 
1267 	/* get and validate our SCSI target ID */
1268 	i = sd->sd_address.a_target;
1269 	if (i >= sf_max_targets) {
1270 		return (DDI_NOT_WELL_FORMED);
1271 	}
1272 
1273 	/* get our port WWN property */
1274 	t_len = sizeof (wwn);
1275 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1276 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1277 	    (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1278 		/* no port WWN property - ignore the OBP stub node */
1279 		return (DDI_NOT_WELL_FORMED);
1280 	}
1281 
1282 	/* get our LIP count property */
1283 	t_len = sizeof (lip_cnt);
1284 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1285 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1286 	    (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1287 		return (DDI_FAILURE);
1288 	}
1289 	/* and our LUN property */
1290 	t_len = sizeof (lun);
1291 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1292 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1293 	    (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1294 		return (DDI_FAILURE);
1295 	}
1296 
1297 	/* find the target structure for this instance */
1298 	mutex_enter(&sf->sf_mutex);
1299 	if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1300 		mutex_exit(&sf->sf_mutex);
1301 		return (DDI_FAILURE);
1302 	}
1303 
1304 	mutex_enter(&target->sft_mutex);
1305 	if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1306 	    & SF_TARGET_INIT_DONE)) {
1307 		/*
1308 		 * set links between HBA transport and target structures
1309 		 * and set done flag
1310 		 */
1311 		hba_tran->tran_tgt_private = target;
1312 		target->sft_tran = hba_tran;
1313 		target->sft_state |= SF_TARGET_INIT_DONE;
1314 	} else {
1315 		/* already initialized ?? */
1316 		mutex_exit(&target->sft_mutex);
1317 		mutex_exit(&sf->sf_mutex);
1318 		return (DDI_FAILURE);
1319 	}
1320 	mutex_exit(&target->sft_mutex);
1321 	mutex_exit(&sf->sf_mutex);
1322 
1323 	return (DDI_SUCCESS);
1324 }
1325 
1326 
1327 /*
1328  * called by transport to free a target
1329  */
1330 /* ARGSUSED */
1331 static void
sf_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)1332 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1333     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1334 {
1335 	struct sf_target *target = hba_tran->tran_tgt_private;
1336 
1337 	if (target != NULL) {
1338 		mutex_enter(&target->sft_mutex);
1339 		target->sft_tran = NULL;
1340 		target->sft_state &= ~SF_TARGET_INIT_DONE;
1341 		mutex_exit(&target->sft_mutex);
1342 	}
1343 }
1344 
1345 
1346 /*
1347  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1348  * success, else return FALSE
1349  */
1350 /*ARGSUSED*/
1351 static int
sf_pkt_alloc_extern(struct sf * sf,struct sf_pkt * cmd,int tgtlen,int statuslen,int kf)1352 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1353     int tgtlen, int statuslen, int kf)
1354 {
1355 	caddr_t scbp, tgt;
1356 	int failure = FALSE;
1357 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1358 
1359 
1360 	tgt = scbp = NULL;
1361 
1362 	if (tgtlen > PKT_PRIV_LEN) {
1363 		if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1364 			failure = TRUE;
1365 		} else {
1366 			cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1367 			pkt->pkt_private = tgt;
1368 		}
1369 	}
1370 	if (statuslen > EXTCMDS_STATUS_SIZE) {
1371 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1372 			failure = TRUE;
1373 		} else {
1374 			cmd->cmd_flags |= CFLAG_SCBEXTERN;
1375 			pkt->pkt_scbp = (opaque_t)scbp;
1376 		}
1377 	}
1378 	if (failure) {
1379 		sf_pkt_destroy_extern(sf, cmd);
1380 	}
1381 	return (failure);
1382 }
1383 
1384 
1385 /*
1386  * deallocator for non-std size cdb/pkt_private/status
1387  */
1388 static void
sf_pkt_destroy_extern(struct sf * sf,struct sf_pkt * cmd)1389 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1390 {
1391 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1392 
1393 	if (cmd->cmd_flags & CFLAG_FREE) {
1394 		cmn_err(CE_PANIC,
1395 		    "sf_scsi_impl_pktfree: freeing free packet");
1396 		_NOTE(NOT_REACHED)
1397 		/* NOTREACHED */
1398 	}
1399 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1400 		kmem_free((caddr_t)pkt->pkt_scbp,
1401 		    (size_t)cmd->cmd_scblen);
1402 	}
1403 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1404 		kmem_free((caddr_t)pkt->pkt_private,
1405 		    (size_t)cmd->cmd_privlen);
1406 	}
1407 
1408 	cmd->cmd_flags = CFLAG_FREE;
1409 	kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1410 }
1411 
1412 
1413 /*
1414  * create or initialize a SCSI packet -- called internally and
1415  * by the transport
1416  */
1417 static struct scsi_pkt *
sf_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)1418 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1419     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1420     int flags, int (*callback)(), caddr_t arg)
1421 {
1422 	int kf;
1423 	int failure = FALSE;
1424 	struct sf_pkt *cmd;
1425 	struct sf *sf = ADDR2SF(ap);
1426 	struct sf_target *target = ADDR2TARGET(ap);
1427 	struct sf_pkt	*new_cmd = NULL;
1428 	struct fcal_packet	*fpkt;
1429 	fc_frame_header_t	*hp;
1430 	struct fcp_cmd *fcmd;
1431 
1432 
1433 	/*
1434 	 * If we've already allocated a pkt once,
1435 	 * this request is for dma allocation only.
1436 	 */
1437 	if (pkt == NULL) {
1438 
1439 		/*
1440 		 * First step of sf_scsi_init_pkt:  pkt allocation
1441 		 */
1442 		if (cmdlen > FCP_CDB_SIZE) {
1443 			return (NULL);
1444 		}
1445 
1446 		kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1447 
1448 		if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1449 			/*
1450 			 * Selective zeroing of the pkt.
1451 			 */
1452 
1453 			cmd->cmd_flags = 0;
1454 			cmd->cmd_forw = 0;
1455 			cmd->cmd_back = 0;
1456 			cmd->cmd_next = 0;
1457 			cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1458 			    sizeof (struct sf_pkt) + sizeof (struct
1459 			    fcal_packet));
1460 			cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1461 			    sizeof (struct sf_pkt));
1462 			cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1463 			cmd->cmd_state = SF_STATE_IDLE;
1464 			cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1465 			cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1466 			cmd->cmd_pkt->pkt_comp	= NULL;
1467 			cmd->cmd_pkt->pkt_flags	= 0;
1468 			cmd->cmd_pkt->pkt_time	= 0;
1469 			cmd->cmd_pkt->pkt_resid	= 0;
1470 			cmd->cmd_pkt->pkt_reason = 0;
1471 			cmd->cmd_cdblen = (uchar_t)cmdlen;
1472 			cmd->cmd_scblen		= statuslen;
1473 			cmd->cmd_privlen	= tgtlen;
1474 			cmd->cmd_pkt->pkt_address = *ap;
1475 
1476 			/* zero pkt_private */
1477 			(int *)(cmd->cmd_pkt->pkt_private =
1478 			    cmd->cmd_pkt_private);
1479 			bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1480 			    PKT_PRIV_LEN);
1481 		} else {
1482 			failure = TRUE;
1483 		}
1484 
1485 		if (failure ||
1486 		    (tgtlen > PKT_PRIV_LEN) ||
1487 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
1488 			if (!failure) {
1489 				/* need to allocate more space */
1490 				failure = sf_pkt_alloc_extern(sf, cmd,
1491 				    tgtlen, statuslen, kf);
1492 			}
1493 			if (failure) {
1494 				return (NULL);
1495 			}
1496 		}
1497 
1498 		fpkt = cmd->cmd_fp_pkt;
1499 		if (cmd->cmd_block == NULL) {
1500 
1501 			/* allocate cmd/response pool buffers */
1502 			if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1503 				sf_pkt_destroy_extern(sf, cmd);
1504 				return (NULL);
1505 			}
1506 
1507 			/* fill in the FC-AL packet */
1508 			fpkt->fcal_pkt_cookie = sf->sf_socp;
1509 			fpkt->fcal_pkt_comp = sf_cmd_callback;
1510 			fpkt->fcal_pkt_flags = 0;
1511 			fpkt->fcal_magic = FCALP_MAGIC;
1512 			fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1513 			    (ushort_t)(SOC_FC_HEADER |
1514 			    sf->sf_sochandle->fcal_portno);
1515 			fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1516 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1517 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1518 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1519 			fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1520 			    (uint32_t)cmd->cmd_dmac;
1521 			fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1522 			    sizeof (struct fcp_cmd);
1523 			fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1524 			    (uint32_t)cmd->cmd_rsp_dmac;
1525 			fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1526 			    FCP_MAX_RSP_IU_SIZE;
1527 
1528 			/* Fill in the Fabric Channel Header */
1529 			hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1530 			hp->r_ctl = R_CTL_COMMAND;
1531 			hp->type = TYPE_SCSI_FCP;
1532 			hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1533 			hp->reserved1 = 0;
1534 			hp->seq_id = 0;
1535 			hp->df_ctl  = 0;
1536 			hp->seq_cnt = 0;
1537 			hp->ox_id = 0xffff;
1538 			hp->rx_id = 0xffff;
1539 			hp->ro = 0;
1540 
1541 			/* Establish the LUN */
1542 			bcopy((caddr_t)&target->sft_lun.b,
1543 			    (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1544 			    FCP_LUN_SIZE);
1545 			*((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1546 		}
1547 		cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1548 
1549 		mutex_enter(&target->sft_pkt_mutex);
1550 
1551 		target->sft_pkt_tail->cmd_forw = cmd;
1552 		cmd->cmd_back = target->sft_pkt_tail;
1553 		cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1554 		target->sft_pkt_tail = cmd;
1555 
1556 		mutex_exit(&target->sft_pkt_mutex);
1557 		new_cmd = cmd;		/* for later cleanup if needed */
1558 	} else {
1559 		/* pkt already exists -- just a request for DMA allocation */
1560 		cmd = PKT2CMD(pkt);
1561 		fpkt = cmd->cmd_fp_pkt;
1562 	}
1563 
1564 	/* zero cdb (bzero is too slow) */
1565 	bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1566 
1567 	/*
1568 	 * Second step of sf_scsi_init_pkt:  dma allocation
1569 	 * Set up dma info
1570 	 */
1571 	if ((bp != NULL) && (bp->b_bcount != 0)) {
1572 		int cmd_flags, dma_flags;
1573 		int rval = 0;
1574 		uint_t dmacookie_count;
1575 
1576 		/* there is a buffer and some data to transfer */
1577 
1578 		/* set up command and DMA flags */
1579 		cmd_flags = cmd->cmd_flags;
1580 		if (bp->b_flags & B_READ) {
1581 			/* a read */
1582 			cmd_flags &= ~CFLAG_DMASEND;
1583 			dma_flags = DDI_DMA_READ;
1584 		} else {
1585 			/* a write */
1586 			cmd_flags |= CFLAG_DMASEND;
1587 			dma_flags = DDI_DMA_WRITE;
1588 		}
1589 		if (flags & PKT_CONSISTENT) {
1590 			cmd_flags |= CFLAG_CMDIOPB;
1591 			dma_flags |= DDI_DMA_CONSISTENT;
1592 		}
1593 
1594 		/* ensure we have a DMA handle */
1595 		if (cmd->cmd_dmahandle == NULL) {
1596 			rval = ddi_dma_alloc_handle(sf->sf_dip,
1597 			    sf->sf_sochandle->fcal_dmaattr, callback, arg,
1598 			    &cmd->cmd_dmahandle);
1599 		}
1600 
1601 		if (rval == 0) {
1602 			/* bind our DMA handle to our buffer */
1603 			rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1604 			    dma_flags, callback, arg, &cmd->cmd_dmacookie,
1605 			    &dmacookie_count);
1606 		}
1607 
1608 		if (rval != 0) {
1609 			/* DMA failure */
1610 			SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1611 			switch (rval) {
1612 			case DDI_DMA_NORESOURCES:
1613 				bioerror(bp, 0);
1614 				break;
1615 			case DDI_DMA_BADATTR:
1616 			case DDI_DMA_NOMAPPING:
1617 				bioerror(bp, EFAULT);
1618 				break;
1619 			case DDI_DMA_TOOBIG:
1620 			default:
1621 				bioerror(bp, EINVAL);
1622 				break;
1623 			}
1624 			/* clear valid flag */
1625 			cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1626 			if (new_cmd != NULL) {
1627 				/* destroy packet if we just created it */
1628 				sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1629 			}
1630 			return (NULL);
1631 		}
1632 
1633 		ASSERT(dmacookie_count == 1);
1634 		/* set up amt to transfer and set valid flag */
1635 		cmd->cmd_dmacount = bp->b_bcount;
1636 		cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1637 
1638 		ASSERT(cmd->cmd_dmahandle != NULL);
1639 	}
1640 
1641 	/* set up FC-AL packet */
1642 	fcmd = cmd->cmd_block;
1643 
1644 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1645 		if (cmd->cmd_flags & CFLAG_DMASEND) {
1646 			/* DMA write */
1647 			fcmd->fcp_cntl.cntl_read_data = 0;
1648 			fcmd->fcp_cntl.cntl_write_data = 1;
1649 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1650 			    CQ_TYPE_IO_WRITE;
1651 		} else {
1652 			/* DMA read */
1653 			fcmd->fcp_cntl.cntl_read_data = 1;
1654 			fcmd->fcp_cntl.cntl_write_data = 0;
1655 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1656 			    CQ_TYPE_IO_READ;
1657 		}
1658 		fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1659 		    (uint32_t)cmd->cmd_dmacookie.dmac_address;
1660 		fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1661 		    cmd->cmd_dmacookie.dmac_size;
1662 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1663 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1664 		    cmd->cmd_dmacookie.dmac_size;
1665 		fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1666 	} else {
1667 		/* not a read or write */
1668 		fcmd->fcp_cntl.cntl_read_data = 0;
1669 		fcmd->fcp_cntl.cntl_write_data = 0;
1670 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1671 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1672 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1673 		    sizeof (struct fcp_cmd);
1674 		fcmd->fcp_data_len = 0;
1675 	}
1676 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1677 
1678 	return (cmd->cmd_pkt);
1679 }
1680 
1681 
1682 /*
1683  * destroy a SCSI packet -- called internally and by the transport
1684  */
1685 static void
sf_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1686 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1687 {
1688 	struct sf_pkt *cmd = PKT2CMD(pkt);
1689 	struct sf *sf = ADDR2SF(ap);
1690 	struct sf_target *target = ADDR2TARGET(ap);
1691 	struct fcal_packet	*fpkt = cmd->cmd_fp_pkt;
1692 
1693 
1694 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1695 		/* DMA was set up -- clean up */
1696 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1697 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1698 	}
1699 
1700 	/* take this packet off the doubly-linked list */
1701 	mutex_enter(&target->sft_pkt_mutex);
1702 	cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1703 	cmd->cmd_forw->cmd_back = cmd->cmd_back;
1704 	mutex_exit(&target->sft_pkt_mutex);
1705 
1706 	fpkt->fcal_pkt_flags = 0;
1707 	/* free the packet */
1708 	if ((cmd->cmd_flags &
1709 	    (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1710 		/* just a regular packet */
1711 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1712 		cmd->cmd_flags = CFLAG_FREE;
1713 		kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1714 	} else {
1715 		/* a packet with extra memory */
1716 		sf_pkt_destroy_extern(sf, cmd);
1717 	}
1718 }
1719 
1720 
1721 /*
1722  * called by transport to unbind DMA handle
1723  */
1724 /* ARGSUSED */
1725 static void
sf_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1726 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1727 {
1728 	struct sf_pkt *cmd = PKT2CMD(pkt);
1729 
1730 
1731 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1732 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1733 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1734 	}
1735 
1736 }
1737 
1738 
1739 /*
1740  * called by transport to synchronize CPU and I/O views of memory
1741  */
1742 /* ARGSUSED */
1743 static void
sf_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1744 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1745 {
1746 	struct sf_pkt *cmd = PKT2CMD(pkt);
1747 
1748 
1749 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1750 		if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1751 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
1752 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1753 		    DDI_SUCCESS) {
1754 			cmn_err(CE_WARN, "sf: sync pkt failed");
1755 		}
1756 	}
1757 }
1758 
1759 
1760 /*
1761  * routine for reset notification setup, to register or cancel. -- called
1762  * by transport
1763  */
1764 static int
sf_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)1765 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1766     void (*callback)(caddr_t), caddr_t arg)
1767 {
1768 	struct sf	*sf = ADDR2SF(ap);
1769 
1770 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1771 	    &sf->sf_mutex, &sf->sf_reset_notify_listf));
1772 }
1773 
1774 
1775 /*
1776  * called by transport to get port WWN property (except sun4u)
1777  */
1778 /* ARGSUSED */
1779 static int
sf_scsi_get_name(struct scsi_device * sd,char * name,int len)1780 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1781 {
1782 	char tbuf[(FC_WWN_SIZE*2)+1];
1783 	unsigned char wwn[FC_WWN_SIZE];
1784 	int i, lun;
1785 	dev_info_t *tgt_dip;
1786 
1787 	tgt_dip = sd->sd_dev;
1788 	i = sizeof (wwn);
1789 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1790 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1791 	    (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1792 		name[0] = '\0';
1793 		return (0);
1794 	}
1795 	i = sizeof (lun);
1796 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1797 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1798 	    (caddr_t)&lun, &i) != DDI_SUCCESS) {
1799 		name[0] = '\0';
1800 		return (0);
1801 	}
1802 	for (i = 0; i < FC_WWN_SIZE; i++)
1803 		(void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1804 	(void) sprintf(name, "w%s,%x", tbuf, lun);
1805 	return (1);
1806 }
1807 
1808 
1809 /*
1810  * called by transport to get target soft AL-PA (except sun4u)
1811  */
1812 /* ARGSUSED */
1813 static int
sf_scsi_get_bus_addr(struct scsi_device * sd,char * name,int len)1814 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1815 {
1816 	struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1817 
1818 	if (target == NULL)
1819 		return (0);
1820 
1821 	(void) sprintf(name, "%x", target->sft_al_pa);
1822 	return (1);
1823 }
1824 
1825 
1826 /*
1827  * add to the command/response buffer pool for this sf instance
1828  */
1829 static int
sf_add_cr_pool(struct sf * sf)1830 sf_add_cr_pool(struct sf *sf)
1831 {
1832 	int		cmd_buf_size;
1833 	size_t		real_cmd_buf_size;
1834 	int		rsp_buf_size;
1835 	size_t		real_rsp_buf_size;
1836 	uint_t		i, ccount;
1837 	struct sf_cr_pool	*ptr;
1838 	struct sf_cr_free_elem *cptr;
1839 	caddr_t	dptr, eptr;
1840 	ddi_dma_cookie_t	cmd_cookie;
1841 	ddi_dma_cookie_t	rsp_cookie;
1842 	int		cmd_bound = FALSE, rsp_bound = FALSE;
1843 
1844 
1845 	/* allocate room for the pool */
1846 	if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1847 	    NULL) {
1848 		return (DDI_FAILURE);
1849 	}
1850 
1851 	/* allocate a DMA handle for the command pool */
1852 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1853 	    DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1854 		goto fail;
1855 	}
1856 
1857 	/*
1858 	 * Get a piece of memory in which to put commands
1859 	 */
1860 	cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1861 	if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1862 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1863 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1864 	    &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1865 		goto fail;
1866 	}
1867 
1868 	/* bind the DMA handle to an address */
1869 	if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1870 	    ptr->cmd_base, real_cmd_buf_size,
1871 	    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1872 	    NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1873 		goto fail;
1874 	}
1875 	cmd_bound = TRUE;
1876 	/* ensure only one cookie was allocated */
1877 	if (ccount != 1) {
1878 		goto fail;
1879 	}
1880 
1881 	/* allocate a DMA handle for the response pool */
1882 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1883 	    DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1884 		goto fail;
1885 	}
1886 
1887 	/*
1888 	 * Get a piece of memory in which to put responses
1889 	 */
1890 	rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1891 	if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1892 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1893 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1894 	    &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1895 		goto fail;
1896 	}
1897 
1898 	/* bind the DMA handle to an address */
1899 	if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1900 	    ptr->rsp_base, real_rsp_buf_size,
1901 	    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1902 	    NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1903 		goto fail;
1904 	}
1905 	rsp_bound = TRUE;
1906 	/* ensure only one cookie was allocated */
1907 	if (ccount != 1) {
1908 		goto fail;
1909 	}
1910 
1911 	/*
1912 	 * Generate a (cmd/rsp structure) free list
1913 	 */
1914 	/* ensure ptr points to start of long word (8-byte block) */
1915 	dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1916 	/* keep track of actual size after moving pointer */
1917 	real_cmd_buf_size -= (dptr - ptr->cmd_base);
1918 	eptr = ptr->rsp_base;
1919 
1920 	/* set actual total number of entries */
1921 	ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1922 	    (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1923 	ptr->nfree = ptr->ntot;
1924 	ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1925 	ptr->sf = sf;
1926 
1927 	/* set up DMA for each pair of entries */
1928 	i = 0;
1929 	while (i < ptr->ntot) {
1930 		cptr = (struct sf_cr_free_elem *)dptr;
1931 		dptr += sizeof (struct fcp_cmd);
1932 
1933 		cptr->next = (struct sf_cr_free_elem *)dptr;
1934 		cptr->rsp = eptr;
1935 
1936 		cptr->cmd_dmac = cmd_cookie.dmac_address +
1937 		    (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1938 
1939 		cptr->rsp_dmac = rsp_cookie.dmac_address +
1940 		    (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1941 
1942 		eptr += FCP_MAX_RSP_IU_SIZE;
1943 		i++;
1944 	}
1945 
1946 	/* terminate the list */
1947 	cptr->next = NULL;
1948 
1949 	/* add this list at front of current one */
1950 	mutex_enter(&sf->sf_cr_mutex);
1951 	ptr->next = sf->sf_cr_pool;
1952 	sf->sf_cr_pool = ptr;
1953 	sf->sf_cr_pool_cnt++;
1954 	mutex_exit(&sf->sf_cr_mutex);
1955 
1956 	return (DDI_SUCCESS);
1957 
1958 fail:
1959 	/* we failed so clean up */
1960 	if (ptr->cmd_dma_handle != NULL) {
1961 		if (cmd_bound) {
1962 			(void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1963 		}
1964 		ddi_dma_free_handle(&ptr->cmd_dma_handle);
1965 	}
1966 
1967 	if (ptr->rsp_dma_handle != NULL) {
1968 		if (rsp_bound) {
1969 			(void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1970 		}
1971 		ddi_dma_free_handle(&ptr->rsp_dma_handle);
1972 	}
1973 
1974 	if (ptr->cmd_base != NULL) {
1975 		ddi_dma_mem_free(&ptr->cmd_acc_handle);
1976 	}
1977 
1978 	if (ptr->rsp_base != NULL) {
1979 		ddi_dma_mem_free(&ptr->rsp_acc_handle);
1980 	}
1981 
1982 	kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1983 	return (DDI_FAILURE);
1984 }
1985 
1986 
1987 /*
1988  * allocate a command/response buffer from the pool, allocating more
1989  * in the pool as needed
1990  */
1991 static int
sf_cr_alloc(struct sf * sf,struct sf_pkt * cmd,int (* func)())1992 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1993 {
1994 	struct sf_cr_pool *ptr;
1995 	struct sf_cr_free_elem *cptr;
1996 
1997 
1998 	mutex_enter(&sf->sf_cr_mutex);
1999 
2000 try_again:
2001 
2002 	/* find a free buffer in the existing pool */
2003 	ptr = sf->sf_cr_pool;
2004 	while (ptr != NULL) {
2005 		if (ptr->nfree != 0) {
2006 			ptr->nfree--;
2007 			break;
2008 		} else {
2009 			ptr = ptr->next;
2010 		}
2011 	}
2012 
2013 	/* did we find a free buffer ? */
2014 	if (ptr != NULL) {
2015 		/* we found a free buffer -- take it off the free list */
2016 		cptr = ptr->free;
2017 		ptr->free = cptr->next;
2018 		mutex_exit(&sf->sf_cr_mutex);
2019 		/* set up the command to use the buffer pair */
2020 		cmd->cmd_block = (struct fcp_cmd *)cptr;
2021 		cmd->cmd_dmac = cptr->cmd_dmac;
2022 		cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2023 		cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2024 		cmd->cmd_cr_pool = ptr;
2025 		return (DDI_SUCCESS);		/* success */
2026 	}
2027 
2028 	/* no free buffer available -- can we allocate more ? */
2029 	if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2030 		/* we need to allocate more buffer pairs */
2031 		if (sf->sf_cr_flag) {
2032 			/* somebody already allocating for this instance */
2033 			if (func == SLEEP_FUNC) {
2034 				/* user wants to wait */
2035 				cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2036 				/* we've been woken so go try again */
2037 				goto try_again;
2038 			}
2039 			/* user does not want to wait */
2040 			mutex_exit(&sf->sf_cr_mutex);
2041 			sf->sf_stats.cralloc_failures++;
2042 			return (DDI_FAILURE);	/* give up */
2043 		}
2044 		/* set flag saying we're allocating */
2045 		sf->sf_cr_flag = 1;
2046 		mutex_exit(&sf->sf_cr_mutex);
2047 		/* add to our pool */
2048 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2049 			/* couldn't add to our pool for some reason */
2050 			mutex_enter(&sf->sf_cr_mutex);
2051 			sf->sf_cr_flag = 0;
2052 			cv_broadcast(&sf->sf_cr_cv);
2053 			mutex_exit(&sf->sf_cr_mutex);
2054 			sf->sf_stats.cralloc_failures++;
2055 			return (DDI_FAILURE);	/* give up */
2056 		}
2057 		/*
2058 		 * clear flag saying we're allocating and tell all other
2059 		 * that care
2060 		 */
2061 		mutex_enter(&sf->sf_cr_mutex);
2062 		sf->sf_cr_flag = 0;
2063 		cv_broadcast(&sf->sf_cr_cv);
2064 		/* now that we have more buffers try again */
2065 		goto try_again;
2066 	}
2067 
2068 	/* we don't have room to allocate any more buffers */
2069 	mutex_exit(&sf->sf_cr_mutex);
2070 	sf->sf_stats.cralloc_failures++;
2071 	return (DDI_FAILURE);			/* give up */
2072 }
2073 
2074 
2075 /*
2076  * free a cmd/response buffer pair in our pool
2077  */
2078 static void
sf_cr_free(struct sf_cr_pool * cp,struct sf_pkt * cmd)2079 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2080 {
2081 	struct sf *sf = cp->sf;
2082 	struct sf_cr_free_elem *elem;
2083 
2084 	elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2085 	elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2086 	elem->cmd_dmac = cmd->cmd_dmac;
2087 	elem->rsp_dmac = cmd->cmd_rsp_dmac;
2088 
2089 	mutex_enter(&sf->sf_cr_mutex);
2090 	cp->nfree++;
2091 	ASSERT(cp->nfree <= cp->ntot);
2092 
2093 	elem->next = cp->free;
2094 	cp->free = elem;
2095 	mutex_exit(&sf->sf_cr_mutex);
2096 }
2097 
2098 
2099 /*
2100  * free our pool of cmd/response buffers
2101  */
2102 static void
sf_crpool_free(struct sf * sf)2103 sf_crpool_free(struct sf *sf)
2104 {
2105 	struct sf_cr_pool *cp, *prev;
2106 
2107 	prev = NULL;
2108 	mutex_enter(&sf->sf_cr_mutex);
2109 	cp = sf->sf_cr_pool;
2110 	while (cp != NULL) {
2111 		if (cp->nfree == cp->ntot) {
2112 			if (prev != NULL) {
2113 				prev->next = cp->next;
2114 			} else {
2115 				sf->sf_cr_pool = cp->next;
2116 			}
2117 			sf->sf_cr_pool_cnt--;
2118 			mutex_exit(&sf->sf_cr_mutex);
2119 
2120 			(void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2121 			ddi_dma_free_handle(&cp->cmd_dma_handle);
2122 			(void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2123 			ddi_dma_free_handle(&cp->rsp_dma_handle);
2124 			ddi_dma_mem_free(&cp->cmd_acc_handle);
2125 			ddi_dma_mem_free(&cp->rsp_acc_handle);
2126 			kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2127 			return;
2128 		}
2129 		prev = cp;
2130 		cp = cp->next;
2131 	}
2132 	mutex_exit(&sf->sf_cr_mutex);
2133 }
2134 
2135 
2136 /* ARGSUSED */
2137 static int
sf_kmem_cache_constructor(void * buf,void * arg,int size)2138 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2139 {
2140 	struct sf_pkt *cmd = buf;
2141 
2142 	mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2143 	cmd->cmd_block = NULL;
2144 	cmd->cmd_dmahandle = NULL;
2145 	return (0);
2146 }
2147 
2148 
2149 /* ARGSUSED */
2150 static void
sf_kmem_cache_destructor(void * buf,void * size)2151 sf_kmem_cache_destructor(void *buf, void *size)
2152 {
2153 	struct sf_pkt *cmd = buf;
2154 
2155 	if (cmd->cmd_dmahandle != NULL) {
2156 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
2157 	}
2158 
2159 	if (cmd->cmd_block != NULL) {
2160 		sf_cr_free(cmd->cmd_cr_pool, cmd);
2161 	}
2162 	mutex_destroy(&cmd->cmd_abort_mutex);
2163 }
2164 
2165 
2166 /*
2167  * called by transport when a state change occurs
2168  */
2169 static void
sf_statec_callback(void * arg,int msg)2170 sf_statec_callback(void *arg, int msg)
2171 {
2172 	struct sf *sf = (struct sf *)arg;
2173 	struct sf_target	*target;
2174 	int i;
2175 	struct sf_pkt *cmd;
2176 	struct scsi_pkt *pkt;
2177 
2178 
2179 
2180 	switch (msg) {
2181 
2182 	case FCAL_STATUS_LOOP_ONLINE: {
2183 		uchar_t		al_pa;		/* to save AL-PA */
2184 		int		ret;		/* ret value from getmap */
2185 		int		lip_cnt;	/* to save current count */
2186 		int		cnt;		/* map length */
2187 
2188 		/*
2189 		 * the loop has gone online
2190 		 */
2191 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2192 		    ddi_get_instance(sf->sf_dip)));
2193 		mutex_enter(&sf->sf_mutex);
2194 		sf->sf_lip_cnt++;
2195 		sf->sf_state = SF_STATE_ONLINING;
2196 		mutex_exit(&sf->sf_mutex);
2197 
2198 		/* scan each target hash queue */
2199 		for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2200 			target = sf->sf_wwn_lists[i];
2201 			while (target != NULL) {
2202 				/*
2203 				 * foreach target, if it's not offline then
2204 				 * mark it as busy
2205 				 */
2206 				mutex_enter(&target->sft_mutex);
2207 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2208 					target->sft_state |= (SF_TARGET_BUSY
2209 					    | SF_TARGET_MARK);
2210 #ifdef DEBUG
2211 				/*
2212 				 * for debugging, print out info on any
2213 				 * pending commands (left hanging)
2214 				 */
2215 				cmd = target->sft_pkt_head;
2216 				while (cmd != (struct sf_pkt *)&target->
2217 				    sft_pkt_head) {
2218 					if (cmd->cmd_state ==
2219 					    SF_STATE_ISSUED) {
2220 						SF_DEBUG(1, (sf, CE_CONT,
2221 						    "cmd 0x%p pending "
2222 						    "after lip\n",
2223 						    (void *)cmd->cmd_fp_pkt));
2224 					}
2225 					cmd = cmd->cmd_forw;
2226 				}
2227 #endif
2228 				mutex_exit(&target->sft_mutex);
2229 				target = target->sft_next;
2230 			}
2231 		}
2232 
2233 		/*
2234 		 * since the loop has just gone online get a new map from
2235 		 * the transport
2236 		 */
2237 		if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2238 		    sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2239 		    sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2240 			if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2241 				(void) soc_take_core(sf->sf_sochandle,
2242 				    sf->sf_socp);
2243 				sf_core = 0;
2244 			}
2245 			sf_log(sf, CE_WARN,
2246 			    "!soc lilp map failed status=0x%x\n", ret);
2247 			mutex_enter(&sf->sf_mutex);
2248 			sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2249 			sf->sf_lip_cnt++;
2250 			sf->sf_state = SF_STATE_OFFLINE;
2251 			mutex_exit(&sf->sf_mutex);
2252 			return;
2253 		}
2254 
2255 		/* ensure consistent view of DMA memory */
2256 		(void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2257 		    DDI_DMA_SYNC_FORKERNEL);
2258 
2259 		/* how many entries in map ? */
2260 		cnt = sf->sf_lilp_map->lilp_length;
2261 		if (cnt >= SF_MAX_LILP_ENTRIES) {
2262 			sf_log(sf, CE_WARN, "invalid lilp map\n");
2263 			return;
2264 		}
2265 
2266 		mutex_enter(&sf->sf_mutex);
2267 		sf->sf_device_count = cnt - 1;
2268 		sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2269 		lip_cnt = sf->sf_lip_cnt;
2270 		al_pa = sf->sf_al_pa;
2271 
2272 		SF_DEBUG(1, (sf, CE_CONT,
2273 		    "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2274 
2275 		/*
2276 		 * since the last entry of the map may be mine (common) check
2277 		 * for that, and if it is we have one less entry to look at
2278 		 */
2279 		if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2280 			cnt--;
2281 		}
2282 		/* If we didn't get a valid loop map enable all targets */
2283 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2284 			for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2285 				sf->sf_lilp_map->lilp_alpalist[i] =
2286 				    sf_switch_to_alpa[i];
2287 			cnt = i;
2288 			sf->sf_device_count = cnt - 1;
2289 		}
2290 		if (sf->sf_device_count == 0) {
2291 			sf_finish_init(sf, lip_cnt);
2292 			mutex_exit(&sf->sf_mutex);
2293 			break;
2294 		}
2295 		mutex_exit(&sf->sf_mutex);
2296 
2297 		SF_DEBUG(2, (sf, CE_WARN,
2298 		    "!statec_callback: starting with %d targets\n",
2299 		    sf->sf_device_count));
2300 
2301 		/* scan loop map, logging into all ports (except mine) */
2302 		for (i = 0; i < cnt; i++) {
2303 			SF_DEBUG(1, (sf, CE_CONT,
2304 			    "!lilp map entry %d = %x,%x\n", i,
2305 			    sf->sf_lilp_map->lilp_alpalist[i],
2306 			    sf_alpa_to_switch[
2307 			    sf->sf_lilp_map->lilp_alpalist[i]]));
2308 			/* is this entry for somebody else ? */
2309 			if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2310 				/* do a PLOGI to this port */
2311 				if (!sf_login(sf, LA_ELS_PLOGI,
2312 				    sf->sf_lilp_map->lilp_alpalist[i],
2313 				    sf->sf_lilp_map->lilp_alpalist[cnt-1],
2314 				    lip_cnt)) {
2315 					/* a problem logging in */
2316 					mutex_enter(&sf->sf_mutex);
2317 					if (lip_cnt == sf->sf_lip_cnt) {
2318 						/*
2319 						 * problem not from a new LIP
2320 						 */
2321 						sf->sf_device_count--;
2322 						ASSERT(sf->sf_device_count
2323 						    >= 0);
2324 						if (sf->sf_device_count == 0) {
2325 							sf_finish_init(sf,
2326 							    lip_cnt);
2327 						}
2328 					}
2329 					mutex_exit(&sf->sf_mutex);
2330 				}
2331 			}
2332 		}
2333 		break;
2334 	}
2335 
2336 	case FCAL_STATUS_ERR_OFFLINE:
2337 		/*
2338 		 * loop has gone offline due to an error
2339 		 */
2340 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2341 		    ddi_get_instance(sf->sf_dip)));
2342 		mutex_enter(&sf->sf_mutex);
2343 		sf->sf_lip_cnt++;
2344 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2345 		if (!sf->sf_online_timer) {
2346 			sf->sf_online_timer = sf_watchdog_time +
2347 			    SF_ONLINE_TIMEOUT;
2348 		}
2349 		/*
2350 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2351 		 * since throttling logic in sf_watch() depends on
2352 		 * preservation of this flag while device is suspended
2353 		 */
2354 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2355 			sf->sf_state |= SF_STATE_OFFLINE;
2356 			SF_DEBUG(1, (sf, CE_CONT,
2357 			    "sf_statec_callback, sf%d: "
2358 			    "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2359 			    ddi_get_instance(sf->sf_dip)));
2360 		} else {
2361 			sf->sf_state = SF_STATE_OFFLINE;
2362 		}
2363 
2364 		/* scan each possible target on the loop */
2365 		for (i = 0; i < sf_max_targets; i++) {
2366 			target = sf->sf_targets[i];
2367 			while (target != NULL) {
2368 				mutex_enter(&target->sft_mutex);
2369 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2370 					target->sft_state |= (SF_TARGET_BUSY
2371 					    | SF_TARGET_MARK);
2372 				mutex_exit(&target->sft_mutex);
2373 				target = target->sft_next_lun;
2374 			}
2375 		}
2376 		mutex_exit(&sf->sf_mutex);
2377 		break;
2378 
2379 	case FCAL_STATE_RESET: {
2380 		struct sf_els_hdr	*privp;	/* ptr to private list */
2381 		struct sf_els_hdr	*tmpp1;	/* tmp prev hdr ptr */
2382 		struct sf_els_hdr	*tmpp2;	/* tmp next hdr ptr */
2383 		struct sf_els_hdr	*head;	/* to save our private list */
2384 		struct fcal_packet	*fpkt;	/* ptr to pkt in hdr */
2385 
2386 		/*
2387 		 * a transport reset
2388 		 */
2389 		SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2390 		    ddi_get_instance(sf->sf_dip)));
2391 		tmpp1 = head = NULL;
2392 		mutex_enter(&sf->sf_mutex);
2393 		sf->sf_lip_cnt++;
2394 		sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2395 		/*
2396 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2397 		 * since throttling logic in sf_watch() depends on
2398 		 * preservation of this flag while device is suspended
2399 		 */
2400 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2401 			sf->sf_state |= SF_STATE_OFFLINE;
2402 			SF_DEBUG(1, (sf, CE_CONT,
2403 			    "sf_statec_callback, sf%d: "
2404 			    "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2405 			    ddi_get_instance(sf->sf_dip)));
2406 		} else {
2407 			sf->sf_state = SF_STATE_OFFLINE;
2408 		}
2409 
2410 		/*
2411 		 * scan each possible target on the loop, looking for targets
2412 		 * that need callbacks ran
2413 		 */
2414 		for (i = 0; i < sf_max_targets; i++) {
2415 			target = sf->sf_targets[i];
2416 			while (target != NULL) {
2417 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2418 					target->sft_state |= (SF_TARGET_BUSY
2419 					    | SF_TARGET_MARK);
2420 					mutex_exit(&sf->sf_mutex);
2421 					/*
2422 					 * run remove event callbacks for lun
2423 					 *
2424 					 * We have a nasty race condition here
2425 					 * 'cause we're dropping this mutex to
2426 					 * run the callback and expect the
2427 					 * linked list to be the same.
2428 					 */
2429 					(void) ndi_event_retrieve_cookie(
2430 					    sf->sf_event_hdl, target->sft_dip,
2431 					    FCAL_REMOVE_EVENT, &sf_remove_eid,
2432 					    NDI_EVENT_NOPASS);
2433 					(void) ndi_event_run_callbacks(
2434 					    sf->sf_event_hdl,
2435 					    target->sft_dip,
2436 					    sf_remove_eid, NULL);
2437 					mutex_enter(&sf->sf_mutex);
2438 				}
2439 				target = target->sft_next_lun;
2440 			}
2441 		}
2442 
2443 		/*
2444 		 * scan for ELS commands that are in transport, not complete,
2445 		 * and have a valid timeout, building a private list
2446 		 */
2447 		privp = sf->sf_els_list;
2448 		while (privp != NULL) {
2449 			fpkt = privp->fpkt;
2450 			if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2451 			    (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2452 			    (privp->timeout != SF_INVALID_TIMEOUT)) {
2453 				/*
2454 				 * cmd in transport && not complete &&
2455 				 * timeout valid
2456 				 *
2457 				 * move this entry from ELS input list to our
2458 				 * private list
2459 				 */
2460 
2461 				tmpp2 = privp->next; /* save ptr to next */
2462 
2463 				/* push this on private list head */
2464 				privp->next = head;
2465 				head = privp;
2466 
2467 				/* remove this entry from input list */
2468 				if (tmpp1 != NULL) {
2469 					/*
2470 					 * remove this entry from somewhere in
2471 					 * the middle of the list
2472 					 */
2473 					tmpp1->next = tmpp2;
2474 					if (tmpp2 != NULL) {
2475 						tmpp2->prev = tmpp1;
2476 					}
2477 				} else {
2478 					/*
2479 					 * remove this entry from the head
2480 					 * of the list
2481 					 */
2482 					sf->sf_els_list = tmpp2;
2483 					if (tmpp2 != NULL) {
2484 						tmpp2->prev = NULL;
2485 					}
2486 				}
2487 				privp = tmpp2;	/* skip to next entry */
2488 			} else {
2489 				tmpp1 = privp;	/* save ptr to prev entry */
2490 				privp = privp->next; /* skip to next entry */
2491 			}
2492 		}
2493 
2494 		mutex_exit(&sf->sf_mutex);
2495 
2496 		/*
2497 		 * foreach cmd in our list free the ELS packet associated
2498 		 * with it
2499 		 */
2500 		privp = head;
2501 		while (privp != NULL) {
2502 			fpkt = privp->fpkt;
2503 			privp = privp->next;
2504 			sf_els_free(fpkt);
2505 		}
2506 
2507 		/*
2508 		 * scan for commands from each possible target
2509 		 */
2510 		for (i = 0; i < sf_max_targets; i++) {
2511 			target = sf->sf_targets[i];
2512 			while (target != NULL) {
2513 				/*
2514 				 * scan all active commands for this target,
2515 				 * looking for commands that have been issued,
2516 				 * are in transport, and are not yet complete
2517 				 * (so we can terminate them because of the
2518 				 * reset)
2519 				 */
2520 				mutex_enter(&target->sft_pkt_mutex);
2521 				cmd = target->sft_pkt_head;
2522 				while (cmd != (struct sf_pkt *)&target->
2523 				    sft_pkt_head) {
2524 					fpkt = cmd->cmd_fp_pkt;
2525 					mutex_enter(&cmd->cmd_abort_mutex);
2526 					if ((cmd->cmd_state ==
2527 					    SF_STATE_ISSUED) &&
2528 					    (fpkt->fcal_cmd_state &
2529 					    FCAL_CMD_IN_TRANSPORT) &&
2530 					    (!(fpkt->fcal_cmd_state &
2531 					    FCAL_CMD_COMPLETE))) {
2532 						/* a command to be reset */
2533 						pkt = cmd->cmd_pkt;
2534 						pkt->pkt_reason = CMD_RESET;
2535 						pkt->pkt_statistics |=
2536 						    STAT_BUS_RESET;
2537 						cmd->cmd_state = SF_STATE_IDLE;
2538 						mutex_exit(&cmd->
2539 						    cmd_abort_mutex);
2540 						mutex_exit(&target->
2541 						    sft_pkt_mutex);
2542 						if (pkt->pkt_comp != NULL) {
2543 							(*pkt->pkt_comp)(pkt);
2544 						}
2545 						mutex_enter(&target->
2546 						    sft_pkt_mutex);
2547 						cmd = target->sft_pkt_head;
2548 					} else {
2549 						mutex_exit(&cmd->
2550 						    cmd_abort_mutex);
2551 						/* get next command */
2552 						cmd = cmd->cmd_forw;
2553 					}
2554 				}
2555 				mutex_exit(&target->sft_pkt_mutex);
2556 				target = target->sft_next_lun;
2557 			}
2558 		}
2559 
2560 		/*
2561 		 * get packet queue for this target, resetting all remaining
2562 		 * commands
2563 		 */
2564 		mutex_enter(&sf->sf_mutex);
2565 		cmd = sf->sf_pkt_head;
2566 		sf->sf_pkt_head = NULL;
2567 		mutex_exit(&sf->sf_mutex);
2568 
2569 		while (cmd != NULL) {
2570 			pkt = cmd->cmd_pkt;
2571 			cmd = cmd->cmd_next;
2572 			pkt->pkt_reason = CMD_RESET;
2573 			pkt->pkt_statistics |= STAT_BUS_RESET;
2574 			if (pkt->pkt_comp != NULL) {
2575 				(*pkt->pkt_comp)(pkt);
2576 			}
2577 		}
2578 		break;
2579 	}
2580 
2581 	default:
2582 		break;
2583 	}
2584 }
2585 
2586 
2587 /*
2588  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2589  * returning TRUE upon success, else returning FALSE
2590  */
2591 static int
sf_login(struct sf * sf,uchar_t els_code,uchar_t dest_id,uint_t arg1,int lip_cnt)2592 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2593     int lip_cnt)
2594 {
2595 	struct la_els_logi	*logi;
2596 	struct	sf_els_hdr	*privp;
2597 
2598 
2599 	if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2600 	    sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2601 	    (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2602 		sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2603 		    "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2604 		return (FALSE);
2605 	}
2606 
2607 	privp->lip_cnt = lip_cnt;
2608 	if (els_code == LA_ELS_PLOGI) {
2609 		bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2610 		    (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2611 		    - 4);
2612 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2613 		    (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2614 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2615 		    (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2616 		bzero((caddr_t)&logi->reserved, 16);
2617 	} else if (els_code == LA_ELS_LOGO) {
2618 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2619 		    (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2620 		((struct la_els_logo	*)logi)->reserved = 0;
2621 		((struct la_els_logo	*)logi)->nport_id[0] = 0;
2622 		((struct la_els_logo	*)logi)->nport_id[1] = 0;
2623 		((struct la_els_logo	*)logi)->nport_id[2] = arg1;
2624 	}
2625 
2626 	privp->els_code = els_code;
2627 	logi->ls_code = els_code;
2628 	logi->mbz[0] = 0;
2629 	logi->mbz[1] = 0;
2630 	logi->mbz[2] = 0;
2631 
2632 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2633 	return (sf_els_transport(sf, privp));
2634 }
2635 
2636 
2637 /*
2638  * send an ELS IU via the transport,
2639  * returning TRUE upon success, else returning FALSE
2640  */
2641 static int
sf_els_transport(struct sf * sf,struct sf_els_hdr * privp)2642 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2643 {
2644 	struct fcal_packet *fpkt = privp->fpkt;
2645 
2646 
2647 	(void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2648 	    DDI_DMA_SYNC_FORDEV);
2649 	privp->prev = NULL;
2650 	mutex_enter(&sf->sf_mutex);
2651 	privp->next = sf->sf_els_list;
2652 	if (sf->sf_els_list != NULL) {
2653 		sf->sf_els_list->prev = privp;
2654 	}
2655 	sf->sf_els_list = privp;
2656 	mutex_exit(&sf->sf_mutex);
2657 
2658 	/* call the transport to send a packet */
2659 	if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2660 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2661 		mutex_enter(&sf->sf_mutex);
2662 		if (privp->prev != NULL) {
2663 			privp->prev->next = privp->next;
2664 		}
2665 		if (privp->next != NULL) {
2666 			privp->next->prev = privp->prev;
2667 		}
2668 		if (sf->sf_els_list == privp) {
2669 			sf->sf_els_list = privp->next;
2670 		}
2671 		mutex_exit(&sf->sf_mutex);
2672 		sf_els_free(fpkt);
2673 		return (FALSE);			/* failure */
2674 	}
2675 	return (TRUE);				/* success */
2676 }
2677 
2678 
2679 /*
2680  * called as the pkt_comp routine for ELS FC packets
2681  */
2682 static void
sf_els_callback(struct fcal_packet * fpkt)2683 sf_els_callback(struct fcal_packet *fpkt)
2684 {
2685 	struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2686 	struct sf *sf = privp->sf;
2687 	struct sf *tsf;
2688 	int tgt_id;
2689 	struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2690 	struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2691 	struct	sf_target *target;
2692 	short	ncmds;
2693 	short	free_pkt = TRUE;
2694 
2695 
2696 	/*
2697 	 * we've received an ELS callback, i.e. an ELS packet has arrived
2698 	 */
2699 
2700 	/* take the current packet off of the queue */
2701 	mutex_enter(&sf->sf_mutex);
2702 	if (privp->timeout == SF_INVALID_TIMEOUT) {
2703 		mutex_exit(&sf->sf_mutex);
2704 		return;
2705 	}
2706 	if (privp->prev != NULL) {
2707 		privp->prev->next = privp->next;
2708 	}
2709 	if (privp->next != NULL) {
2710 		privp->next->prev = privp->prev;
2711 	}
2712 	if (sf->sf_els_list == privp) {
2713 		sf->sf_els_list = privp->next;
2714 	}
2715 	privp->prev = privp->next = NULL;
2716 	mutex_exit(&sf->sf_mutex);
2717 
2718 	/* get # pkts in this callback */
2719 	ncmds = fpkt->fcal_ncmds;
2720 	ASSERT(ncmds >= 0);
2721 	mutex_enter(&sf->sf_cmd_mutex);
2722 	sf->sf_ncmds = ncmds;
2723 	mutex_exit(&sf->sf_cmd_mutex);
2724 
2725 	/* sync idea of memory */
2726 	(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2727 	    DDI_DMA_SYNC_FORKERNEL);
2728 
2729 	/* was this an OK ACC msg ?? */
2730 	if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2731 	    (ptr->ls_code == LA_ELS_ACC)) {
2732 
2733 		/*
2734 		 * this was an OK ACC pkt
2735 		 */
2736 
2737 		switch (privp->els_code) {
2738 		case LA_ELS_PLOGI:
2739 			/*
2740 			 * was able to to an N_port login
2741 			 */
2742 			SF_DEBUG(2, (sf, CE_CONT,
2743 			    "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2744 			    privp->dest_nport_id,
2745 			    *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2746 			    *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2747 			/* try to do a process login */
2748 			if (!sf_do_prli(sf, privp, ptr)) {
2749 				free_pkt = FALSE;
2750 				goto fail;	/* PRLI failed */
2751 			}
2752 			break;
2753 		case LA_ELS_PRLI:
2754 			/*
2755 			 * was able to do a process login
2756 			 */
2757 			SF_DEBUG(2, (sf, CE_CONT,
2758 			    "!PRLI to al_pa %x succeeded\n",
2759 			    privp->dest_nport_id));
2760 			/* try to do address discovery */
2761 			if (sf_do_adisc(sf, privp) != 1) {
2762 				free_pkt = FALSE;
2763 				goto fail;	/* ADISC failed */
2764 			}
2765 			break;
2766 		case LA_ELS_ADISC:
2767 			/*
2768 			 * found a target via ADISC
2769 			 */
2770 
2771 			SF_DEBUG(2, (sf, CE_CONT,
2772 			    "!ADISC to al_pa %x succeeded\n",
2773 			    privp->dest_nport_id));
2774 
2775 			/* create the target info */
2776 			if ((target = sf_create_target(sf, privp,
2777 			    sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2778 			    (int64_t)0))
2779 			    == NULL) {
2780 				goto fail;	/* can't create target */
2781 			}
2782 
2783 			/*
2784 			 * ensure address discovered matches what we thought
2785 			 * it would be
2786 			 */
2787 			if ((uchar_t)adisc->hard_address !=
2788 			    privp->dest_nport_id) {
2789 				sf_log(sf, CE_WARN,
2790 				    "target 0x%x, AL-PA 0x%x and "
2791 				    "hard address 0x%x don't match\n",
2792 				    sf_alpa_to_switch[
2793 				    (uchar_t)privp->dest_nport_id],
2794 				    privp->dest_nport_id,
2795 				    (uchar_t)adisc->hard_address);
2796 				mutex_enter(&sf->sf_mutex);
2797 				sf_offline_target(sf, target);
2798 				mutex_exit(&sf->sf_mutex);
2799 				goto fail;	/* addr doesn't match */
2800 			}
2801 			/*
2802 			 * get inquiry data from the target
2803 			 */
2804 			if (!sf_do_reportlun(sf, privp, target)) {
2805 				mutex_enter(&sf->sf_mutex);
2806 				sf_offline_target(sf, target);
2807 				mutex_exit(&sf->sf_mutex);
2808 				free_pkt = FALSE;
2809 				goto fail;	/* inquiry failed */
2810 			}
2811 			break;
2812 		default:
2813 			SF_DEBUG(2, (sf, CE_CONT,
2814 			    "!ELS %x to al_pa %x succeeded\n",
2815 			    privp->els_code, privp->dest_nport_id));
2816 			sf_els_free(fpkt);
2817 			break;
2818 		}
2819 
2820 	} else {
2821 
2822 		/*
2823 		 * oh oh -- this was not an OK ACC packet
2824 		 */
2825 
2826 		/* get target ID from dest loop address */
2827 		tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2828 
2829 		/* keep track of failures */
2830 		sf->sf_stats.tstats[tgt_id].els_failures++;
2831 		if (++(privp->retries) < sf_els_retries &&
2832 		    fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2833 			if (fpkt->fcal_pkt_status ==
2834 			    FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2835 				tsf = sf->sf_sibling;
2836 				if (tsf != NULL) {
2837 					mutex_enter(&tsf->sf_cmd_mutex);
2838 					tsf->sf_flag = 1;
2839 					tsf->sf_throttle = SF_DECR_DELTA;
2840 					mutex_exit(&tsf->sf_cmd_mutex);
2841 				}
2842 			}
2843 			privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2844 			privp->prev = NULL;
2845 
2846 			mutex_enter(&sf->sf_mutex);
2847 
2848 			if (privp->lip_cnt == sf->sf_lip_cnt) {
2849 				SF_DEBUG(1, (sf, CE_WARN,
2850 				    "!ELS %x to al_pa %x failed, retrying",
2851 				    privp->els_code, privp->dest_nport_id));
2852 				privp->next = sf->sf_els_list;
2853 				if (sf->sf_els_list != NULL) {
2854 					sf->sf_els_list->prev = privp;
2855 				}
2856 
2857 				sf->sf_els_list = privp;
2858 
2859 				mutex_exit(&sf->sf_mutex);
2860 				/* device busy?  wait a bit ... */
2861 				if (fpkt->fcal_pkt_status ==
2862 				    FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2863 					privp->delayed_retry = 1;
2864 					return;
2865 				}
2866 				/* call the transport to send a pkt */
2867 				if (soc_transport(sf->sf_sochandle, fpkt,
2868 				    FCAL_NOSLEEP, CQ_REQUEST_1) !=
2869 				    FCAL_TRANSPORT_SUCCESS) {
2870 					mutex_enter(&sf->sf_mutex);
2871 					if (privp->prev != NULL) {
2872 						privp->prev->next =
2873 						    privp->next;
2874 					}
2875 					if (privp->next != NULL) {
2876 						privp->next->prev =
2877 						    privp->prev;
2878 					}
2879 					if (sf->sf_els_list == privp) {
2880 						sf->sf_els_list = privp->next;
2881 					}
2882 					mutex_exit(&sf->sf_mutex);
2883 					goto fail;
2884 				} else
2885 					return;
2886 			} else {
2887 				mutex_exit(&sf->sf_mutex);
2888 				goto fail;
2889 			}
2890 		} else {
2891 #ifdef	DEBUG
2892 			if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2893 			SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2894 			    privp->els_code, privp->dest_nport_id));
2895 			if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2896 				SF_DEBUG(2, (sf, CE_NOTE,
2897 				    "els reply code = %x", ptr->ls_code));
2898 				if (ptr->ls_code == LA_ELS_RJT)
2899 					SF_DEBUG(1, (sf, CE_CONT,
2900 					    "LS_RJT reason = %x\n",
2901 					    *(((uint_t *)ptr) + 1)));
2902 			} else
2903 				SF_DEBUG(2, (sf, CE_NOTE,
2904 				    "fc packet status = %x",
2905 				    fpkt->fcal_pkt_status));
2906 			}
2907 #endif
2908 			goto fail;
2909 		}
2910 	}
2911 	return;					/* success */
2912 fail:
2913 	mutex_enter(&sf->sf_mutex);
2914 	if (sf->sf_lip_cnt == privp->lip_cnt) {
2915 		sf->sf_device_count--;
2916 		ASSERT(sf->sf_device_count >= 0);
2917 		if (sf->sf_device_count == 0) {
2918 			sf_finish_init(sf, privp->lip_cnt);
2919 		}
2920 	}
2921 	mutex_exit(&sf->sf_mutex);
2922 	if (free_pkt) {
2923 		sf_els_free(fpkt);
2924 	}
2925 }
2926 
2927 
2928 /*
2929  * send a PRLI (process login) ELS IU via the transport,
2930  * returning TRUE upon success, else returning FALSE
2931  */
2932 static int
sf_do_prli(struct sf * sf,struct sf_els_hdr * privp,struct la_els_logi * ptr)2933 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2934 {
2935 	struct la_els_prli	*prli = (struct la_els_prli *)privp->cmd;
2936 	struct fcp_prli		*fprli;
2937 	struct  fcal_packet	*fpkt = privp->fpkt;
2938 
2939 
2940 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2941 	    sizeof (struct la_els_prli);
2942 	privp->els_code = LA_ELS_PRLI;
2943 	fprli = (struct fcp_prli *)prli->service_params;
2944 	prli->ls_code = LA_ELS_PRLI;
2945 	prli->page_length = 0x10;
2946 	prli->payload_length = sizeof (struct la_els_prli);
2947 	fprli->type = 0x08;			/* no define here? */
2948 	fprli->resvd1 = 0;
2949 	fprli->orig_process_assoc_valid = 0;
2950 	fprli->resp_process_assoc_valid = 0;
2951 	fprli->establish_image_pair = 1;
2952 	fprli->resvd2 = 0;
2953 	fprli->resvd3 = 0;
2954 	fprli->data_overlay_allowed = 0;
2955 	fprli->initiator_fn = 1;
2956 	fprli->target_fn = 0;
2957 	fprli->cmd_data_mixed = 0;
2958 	fprli->data_resp_mixed = 0;
2959 	fprli->read_xfer_rdy_disabled = 1;
2960 	fprli->write_xfer_rdy_disabled = 0;
2961 
2962 	bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2963 	    sizeof (privp->port_wwn));
2964 	bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2965 	    sizeof (privp->node_wwn));
2966 
2967 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2968 	return (sf_els_transport(sf, privp));
2969 }
2970 
2971 
2972 /*
2973  * send an ADISC (address discovery) ELS IU via the transport,
2974  * returning TRUE upon success, else returning FALSE
2975  */
2976 static int
sf_do_adisc(struct sf * sf,struct sf_els_hdr * privp)2977 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2978 {
2979 	struct la_els_adisc	*adisc = (struct la_els_adisc *)privp->cmd;
2980 	struct	fcal_packet	*fpkt = privp->fpkt;
2981 
2982 	privp->els_code = LA_ELS_ADISC;
2983 	adisc->ls_code = LA_ELS_ADISC;
2984 	adisc->mbz[0] = 0;
2985 	adisc->mbz[1] = 0;
2986 	adisc->mbz[2] = 0;
2987 	adisc->hard_address = 0; /* ??? */
2988 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2989 	    sizeof (struct la_els_adisc);
2990 	bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2991 	    (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2992 	bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2993 	    (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2994 	adisc->nport_id = sf->sf_al_pa;
2995 
2996 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2997 	return (sf_els_transport(sf, privp));
2998 }
2999 
3000 
3001 static struct fcal_packet *
sf_els_alloc(struct sf * sf,uchar_t dest_id,int priv_size,int cmd_size,int rsp_size,caddr_t * rprivp,caddr_t * cmd_buf)3002 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3003     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3004 {
3005 	struct	fcal_packet	*fpkt;
3006 	ddi_dma_cookie_t	pcookie;
3007 	ddi_dma_cookie_t	rcookie;
3008 	struct	sf_els_hdr	*privp;
3009 	ddi_dma_handle_t	cmd_dma_handle = NULL;
3010 	ddi_dma_handle_t	rsp_dma_handle = NULL;
3011 	ddi_acc_handle_t	cmd_acc_handle = NULL;
3012 	ddi_acc_handle_t	rsp_acc_handle = NULL;
3013 	size_t			real_size;
3014 	uint_t			ccount;
3015 	fc_frame_header_t	*hp;
3016 	int			cmd_bound = FALSE, rsp_bound = FALSE;
3017 	caddr_t			cmd = NULL;
3018 	caddr_t			rsp = NULL;
3019 
3020 	if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3021 	    sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3022 		SF_DEBUG(1, (sf, CE_WARN,
3023 			"Could not allocate fcal_packet for ELS\n"));
3024 		return (NULL);
3025 	}
3026 
3027 	if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3028 	    KM_NOSLEEP)) == NULL) {
3029 		SF_DEBUG(1, (sf, CE_WARN,
3030 		    "Could not allocate sf_els_hdr for ELS\n"));
3031 		goto fail;
3032 	}
3033 
3034 	privp->size = priv_size;
3035 	fpkt->fcal_pkt_private = (caddr_t)privp;
3036 
3037 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3038 	    DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3039 		SF_DEBUG(1, (sf, CE_WARN,
3040 		    "Could not allocate DMA handle for ELS\n"));
3041 		goto fail;
3042 	}
3043 
3044 	if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3045 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3046 	    DDI_DMA_DONTWAIT, NULL, &cmd,
3047 	    &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3048 		SF_DEBUG(1, (sf, CE_WARN,
3049 		    "Could not allocate DMA memory for ELS\n"));
3050 		goto fail;
3051 	}
3052 
3053 	if (real_size < cmd_size) {
3054 		SF_DEBUG(1, (sf, CE_WARN,
3055 		    "DMA memory too small for ELS\n"));
3056 		goto fail;
3057 	}
3058 
3059 	if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3060 	    cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3061 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3062 		SF_DEBUG(1, (sf, CE_WARN,
3063 		    "Could not bind DMA memory for ELS\n"));
3064 		goto fail;
3065 	}
3066 	cmd_bound = TRUE;
3067 
3068 	if (ccount != 1) {
3069 		SF_DEBUG(1, (sf, CE_WARN,
3070 		    "Wrong cookie count for ELS\n"));
3071 		goto fail;
3072 	}
3073 
3074 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3075 	    DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3076 		SF_DEBUG(1, (sf, CE_WARN,
3077 		    "Could not allocate DMA handle for ELS rsp\n"));
3078 		goto fail;
3079 	}
3080 	if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3081 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3082 	    DDI_DMA_DONTWAIT, NULL, &rsp,
3083 	    &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3084 		SF_DEBUG(1, (sf, CE_WARN,
3085 		    "Could not allocate DMA memory for ELS rsp\n"));
3086 		goto fail;
3087 	}
3088 
3089 	if (real_size < rsp_size) {
3090 		SF_DEBUG(1, (sf, CE_WARN,
3091 		    "DMA memory too small for ELS rsp\n"));
3092 		goto fail;
3093 	}
3094 
3095 	if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3096 	    rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3097 	    DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3098 		SF_DEBUG(1, (sf, CE_WARN,
3099 		    "Could not bind DMA memory for ELS rsp\n"));
3100 		goto fail;
3101 	}
3102 	rsp_bound = TRUE;
3103 
3104 	if (ccount != 1) {
3105 		SF_DEBUG(1, (sf, CE_WARN,
3106 		    "Wrong cookie count for ELS rsp\n"));
3107 		goto fail;
3108 	}
3109 
3110 	privp->cmd = cmd;
3111 	privp->sf = sf;
3112 	privp->cmd_dma_handle = cmd_dma_handle;
3113 	privp->cmd_acc_handle = cmd_acc_handle;
3114 	privp->rsp = rsp;
3115 	privp->rsp_dma_handle = rsp_dma_handle;
3116 	privp->rsp_acc_handle = rsp_acc_handle;
3117 	privp->dest_nport_id = dest_id;
3118 	privp->fpkt = fpkt;
3119 
3120 	fpkt->fcal_pkt_cookie = sf->sf_socp;
3121 	fpkt->fcal_pkt_comp = sf_els_callback;
3122 	fpkt->fcal_magic = FCALP_MAGIC;
3123 	fpkt->fcal_pkt_flags = 0;
3124 	fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3125 	    (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3126 	fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3127 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3128 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3129 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3130 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3131 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3132 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3133 	fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3134 	    pcookie.dmac_address;
3135 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3136 	fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3137 	    rcookie.dmac_address;
3138 	fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3139 
3140 	/* Fill in the Fabric Channel Header */
3141 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3142 	hp->r_ctl = R_CTL_ELS_REQ;
3143 	hp->d_id = dest_id;
3144 	hp->s_id = sf->sf_al_pa;
3145 	hp->type = TYPE_EXTENDED_LS;
3146 	hp->reserved1 = 0;
3147 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3148 	hp->seq_id = 0;
3149 	hp->df_ctl  = 0;
3150 	hp->seq_cnt = 0;
3151 	hp->ox_id = 0xffff;
3152 	hp->rx_id = 0xffff;
3153 	hp->ro = 0;
3154 
3155 	*rprivp = (caddr_t)privp;
3156 	*cmd_buf = cmd;
3157 	return (fpkt);
3158 
3159 fail:
3160 	if (cmd_dma_handle != NULL) {
3161 		if (cmd_bound) {
3162 			(void) ddi_dma_unbind_handle(cmd_dma_handle);
3163 		}
3164 		ddi_dma_free_handle(&cmd_dma_handle);
3165 		privp->cmd_dma_handle = NULL;
3166 	}
3167 	if (rsp_dma_handle != NULL) {
3168 		if (rsp_bound) {
3169 			(void) ddi_dma_unbind_handle(rsp_dma_handle);
3170 		}
3171 		ddi_dma_free_handle(&rsp_dma_handle);
3172 		privp->rsp_dma_handle = NULL;
3173 	}
3174 	sf_els_free(fpkt);
3175 	return (NULL);
3176 }
3177 
3178 
3179 static void
sf_els_free(struct fcal_packet * fpkt)3180 sf_els_free(struct fcal_packet *fpkt)
3181 {
3182 	struct	sf_els_hdr	*privp = fpkt->fcal_pkt_private;
3183 
3184 	if (privp != NULL) {
3185 		if (