1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * NOT a DDI compliant Sun Fibre Channel port driver(fp)
26  *
27  */
28 
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/uio.h>
34 #include <sys/buf.h>
35 #include <sys/modctl.h>
36 #include <sys/open.h>
37 #include <sys/file.h>
38 #include <sys/kmem.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/thread.h>
42 #include <sys/var.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stat.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/promif.h>
48 #include <sys/nvpair.h>
49 #include <sys/byteorder.h>
50 #include <sys/scsi/scsi.h>
51 #include <sys/fibre-channel/fc.h>
52 #include <sys/fibre-channel/impl/fc_ulpif.h>
53 #include <sys/fibre-channel/impl/fc_fcaif.h>
54 #include <sys/fibre-channel/impl/fctl_private.h>
55 #include <sys/fibre-channel/impl/fc_portif.h>
56 #include <sys/fibre-channel/impl/fp.h>
57 
58 /* These are defined in fctl.c! */
59 extern int did_table_size;
60 extern int pwwn_table_size;
61 
62 static struct cb_ops fp_cb_ops = {
63 	fp_open,			/* open */
64 	fp_close,			/* close */
65 	nodev,				/* strategy */
66 	nodev,				/* print */
67 	nodev,				/* dump */
68 	nodev,				/* read */
69 	nodev,				/* write */
70 	fp_ioctl,			/* ioctl */
71 	nodev,				/* devmap */
72 	nodev,				/* mmap */
73 	nodev,				/* segmap */
74 	nochpoll,			/* chpoll */
75 	ddi_prop_op,			/* cb_prop_op */
76 	0,				/* streamtab */
77 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
78 	CB_REV,				/* rev */
79 	nodev,				/* aread */
80 	nodev				/* awrite */
81 };
82 
83 static struct dev_ops fp_ops = {
84 	DEVO_REV,			/* build revision */
85 	0,				/* reference count */
86 	fp_getinfo,			/* getinfo */
87 	nulldev,			/* identify - Obsoleted */
88 	nulldev,			/* probe */
89 	fp_attach,			/* attach */
90 	fp_detach,			/* detach */
91 	nodev,				/* reset */
92 	&fp_cb_ops,			/* cb_ops */
93 	NULL,				/* bus_ops */
94 	fp_power,			/* power */
95 	ddi_quiesce_not_needed 		/* quiesce */
96 };
97 
98 #define	FP_VERSION		"1.99"
99 #define	FP_NAME_VERSION		"SunFC Port v" FP_VERSION
100 
101 char *fp_version = FP_NAME_VERSION;
102 
103 static struct modldrv modldrv = {
104 	&mod_driverops,			/* Type of Module */
105 	FP_NAME_VERSION,		/* Name/Version of fp */
106 	&fp_ops				/* driver ops */
107 };
108 
109 static struct modlinkage modlinkage = {
110 	MODREV_1,	/* Rev of the loadable modules system */
111 	&modldrv,	/* NULL terminated list of */
112 	NULL		/* Linkage structures */
113 };
114 
115 
116 
117 static uint16_t ns_reg_cmds[] = {
118 	NS_RPN_ID,
119 	NS_RNN_ID,
120 	NS_RCS_ID,
121 	NS_RFT_ID,
122 	NS_RPT_ID,
123 	NS_RSPN_ID,
124 	NS_RSNN_NN
125 };
126 
127 struct fp_xlat {
128 	uchar_t	xlat_state;
129 	int	xlat_rval;
130 } fp_xlat [] = {
131 	{ FC_PKT_SUCCESS,	FC_SUCCESS },
132 	{ FC_PKT_REMOTE_STOP,	FC_FAILURE },
133 	{ FC_PKT_LOCAL_RJT,	FC_FAILURE },
134 	{ FC_PKT_NPORT_RJT,	FC_ELS_PREJECT },
135 	{ FC_PKT_FABRIC_RJT,	FC_ELS_FREJECT },
136 	{ FC_PKT_LOCAL_BSY,	FC_TRAN_BUSY },
137 	{ FC_PKT_TRAN_BSY,	FC_TRAN_BUSY },
138 	{ FC_PKT_NPORT_BSY,	FC_PBUSY },
139 	{ FC_PKT_FABRIC_BSY,	FC_FBUSY },
140 	{ FC_PKT_LS_RJT,	FC_FAILURE },
141 	{ FC_PKT_BA_RJT,	FC_FAILURE },
142 	{ FC_PKT_TIMEOUT,	FC_FAILURE },
143 	{ FC_PKT_TRAN_ERROR,	FC_TRANSPORT_ERROR },
144 	{ FC_PKT_FAILURE,	FC_FAILURE },
145 	{ FC_PKT_PORT_OFFLINE,	FC_OFFLINE }
146 };
147 
148 static uchar_t fp_valid_alpas[] = {
149 	0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
150 	0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
151 	0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
152 	0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
153 	0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
154 	0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
155 	0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
156 	0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
157 	0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
158 	0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
159 	0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
160 	0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
161 	0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
162 	0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
163 };
164 
165 static struct fp_perms {
166 	uint16_t	fp_ioctl_cmd;
167 	uchar_t		fp_open_flag;
168 } fp_perm_list [] = {
169 	{ FCIO_GET_NUM_DEVS,		FP_OPEN },
170 	{ FCIO_GET_DEV_LIST,		FP_OPEN },
171 	{ FCIO_GET_SYM_PNAME,		FP_OPEN },
172 	{ FCIO_GET_SYM_NNAME,		FP_OPEN },
173 	{ FCIO_SET_SYM_PNAME,		FP_EXCL },
174 	{ FCIO_SET_SYM_NNAME,		FP_EXCL },
175 	{ FCIO_GET_LOGI_PARAMS,		FP_OPEN },
176 	{ FCIO_DEV_LOGIN,		FP_EXCL },
177 	{ FCIO_DEV_LOGOUT,		FP_EXCL },
178 	{ FCIO_GET_STATE,		FP_OPEN },
179 	{ FCIO_DEV_REMOVE,		FP_EXCL },
180 	{ FCIO_GET_FCODE_REV,		FP_OPEN },
181 	{ FCIO_GET_FW_REV,		FP_OPEN },
182 	{ FCIO_GET_DUMP_SIZE,		FP_OPEN },
183 	{ FCIO_FORCE_DUMP,		FP_EXCL },
184 	{ FCIO_GET_DUMP,		FP_OPEN },
185 	{ FCIO_GET_TOPOLOGY,		FP_OPEN },
186 	{ FCIO_RESET_LINK,		FP_EXCL },
187 	{ FCIO_RESET_HARD,		FP_EXCL },
188 	{ FCIO_RESET_HARD_CORE,		FP_EXCL },
189 	{ FCIO_DIAG,			FP_OPEN },
190 	{ FCIO_NS,			FP_EXCL },
191 	{ FCIO_DOWNLOAD_FW,		FP_EXCL },
192 	{ FCIO_DOWNLOAD_FCODE,		FP_EXCL },
193 	{ FCIO_LINK_STATUS,		FP_OPEN },
194 	{ FCIO_GET_HOST_PARAMS,		FP_OPEN },
195 	{ FCIO_GET_NODE_ID,		FP_OPEN },
196 	{ FCIO_SET_NODE_ID,		FP_EXCL },
197 	{ FCIO_SEND_NODE_ID,		FP_OPEN },
198 	{ FCIO_GET_ADAPTER_ATTRIBUTES,	FP_OPEN },
199 	{ FCIO_GET_OTHER_ADAPTER_PORTS,	FP_OPEN },
200 	{ FCIO_GET_ADAPTER_PORT_ATTRIBUTES,	FP_OPEN },
201 	{ FCIO_GET_DISCOVERED_PORT_ATTRIBUTES,	FP_OPEN },
202 	{ FCIO_GET_PORT_ATTRIBUTES,	FP_OPEN },
203 	{ FCIO_GET_ADAPTER_PORT_STATS,	FP_OPEN },
204 	{ FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
205 	{ FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
206 	{ FCIO_DELETE_NPIV_PORT, FP_OPEN },
207 	{ FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
208 	{ FCIO_CREATE_NPIV_PORT, FP_OPEN },
209 	{ FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
210 };
211 
212 static char *fp_pm_comps[] = {
213 	"NAME=FC Port",
214 	"0=Port Down",
215 	"1=Port Up"
216 };
217 
218 
219 #ifdef	_LITTLE_ENDIAN
220 #define	MAKE_BE_32(x)	{						\
221 		uint32_t	*ptr1, i;				\
222 		ptr1 = (uint32_t *)(x);					\
223 		for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
224 			*ptr1 = BE_32(*ptr1);				\
225 			ptr1++;						\
226 		}							\
227 	}
228 #else
229 #define	MAKE_BE_32(x)
230 #endif
231 
232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
233 static uint32_t fp_options = 0;
234 
235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
236 static int fp_retry_delay = FP_RETRY_DELAY;	/* retry after this delay */
237 static int fp_retry_count = FP_RETRY_COUNT;	/* number of retries */
238 unsigned int fp_offline_ticker;			/* seconds */
239 
240 /*
241  * Driver global variable to anchor the list of soft state structs for
242  * all fp driver instances.  Used with the Solaris DDI soft state functions.
243  */
244 static void *fp_driver_softstate;
245 
246 static clock_t	fp_retry_ticks;
247 static clock_t	fp_offline_ticks;
248 
249 static int fp_retry_ticker;
250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
252 
253 static int		fp_log_size = FP_LOG_SIZE;
254 static int		fp_trace = FP_TRACE_DEFAULT;
255 static fc_trace_logq_t	*fp_logq = NULL;
256 
257 int fp_get_adapter_paths(char *pathList, int count);
258 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
259 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
260     la_wwn_t tgt_pwwn, uint32_t port_id);
261 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
262 static void fp_init_symbolic_names(fc_local_port_t *port);
263 
264 
265 /*
266  * Perform global initialization
267  */
268 int
269 _init(void)
270 {
271 	int ret;
272 
273 	if ((ret = ddi_soft_state_init(&fp_driver_softstate,
274 	    sizeof (struct fc_local_port), 8)) != 0) {
275 		return (ret);
276 	}
277 
278 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
279 		ddi_soft_state_fini(&fp_driver_softstate);
280 		return (ret);
281 	}
282 
283 	fp_logq = fc_trace_alloc_logq(fp_log_size);
284 
285 	if ((ret = mod_install(&modlinkage)) != 0) {
286 		fc_trace_free_logq(fp_logq);
287 		ddi_soft_state_fini(&fp_driver_softstate);
288 		scsi_hba_fini(&modlinkage);
289 	}
290 
291 	return (ret);
292 }
293 
294 
295 /*
296  * Prepare for driver unload
297  */
298 int
299 _fini(void)
300 {
301 	int ret;
302 
303 	if ((ret = mod_remove(&modlinkage)) == 0) {
304 		fc_trace_free_logq(fp_logq);
305 		ddi_soft_state_fini(&fp_driver_softstate);
306 		scsi_hba_fini(&modlinkage);
307 	}
308 
309 	return (ret);
310 }
311 
312 
313 /*
314  * Request mod_info() to handle all cases
315  */
316 int
317 _info(struct modinfo *modinfo)
318 {
319 	return (mod_info(&modlinkage, modinfo));
320 }
321 
322 
323 /*
324  * fp_attach:
325  *
326  * The respective cmd handlers take care of performing
327  * ULP related invocations
328  */
329 static int
330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
331 {
332 	int rval;
333 
334 	/*
335 	 * We check the value of fp_offline_ticker at this
336 	 * point. The variable is global for the driver and
337 	 * not specific to an instance.
338 	 *
339 	 * If there is no user-defined value found in /etc/system
340 	 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
341 	 * The minimum setting for this offline timeout according
342 	 * to the FC-FS2 standard (Fibre Channel Framing and
343 	 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
344 	 *
345 	 * We do not recommend setting the value to less than 10
346 	 * seconds (RA_TOV) or more than 90 seconds. If this
347 	 * variable is greater than 90 seconds then drivers above
348 	 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
349 	 */
350 
351 	fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
352 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
353 	    FP_OFFLINE_TICKER);
354 
355 	if ((fp_offline_ticker < 10) ||
356 	    (fp_offline_ticker > 90)) {
357 		cmn_err(CE_WARN, "Setting fp_offline_ticker to "
358 		    "%d second(s). This is outside the "
359 		    "recommended range of 10..90 seconds",
360 		    fp_offline_ticker);
361 	}
362 
363 	/*
364 	 * Tick every second when there are commands to retry.
365 	 * It should tick at the least granular value of pkt_timeout
366 	 * (which is one second)
367 	 */
368 	fp_retry_ticker = 1;
369 
370 	fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
371 	fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
372 
373 	switch (cmd) {
374 	case DDI_ATTACH:
375 		rval = fp_attach_handler(dip);
376 		break;
377 
378 	case DDI_RESUME:
379 		rval = fp_resume_handler(dip);
380 		break;
381 
382 	default:
383 		rval = DDI_FAILURE;
384 		break;
385 	}
386 	return (rval);
387 }
388 
389 
390 /*
391  * fp_detach:
392  *
393  * If a ULP fails to handle cmd request converse of
394  * cmd is invoked for ULPs that previously succeeded
395  * cmd request.
396  */
397 static int
398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
399 {
400 	int			rval = DDI_FAILURE;
401 	fc_local_port_t		*port;
402 	fc_attach_cmd_t		converse;
403 	uint8_t			cnt;
404 
405 	if ((port = ddi_get_soft_state(fp_driver_softstate,
406 	    ddi_get_instance(dip))) == NULL) {
407 		return (DDI_FAILURE);
408 	}
409 
410 	mutex_enter(&port->fp_mutex);
411 
412 	if (port->fp_ulp_attach) {
413 		mutex_exit(&port->fp_mutex);
414 		return (DDI_FAILURE);
415 	}
416 
417 	switch (cmd) {
418 	case DDI_DETACH:
419 		if (port->fp_task != FP_TASK_IDLE) {
420 			mutex_exit(&port->fp_mutex);
421 			return (DDI_FAILURE);
422 		}
423 
424 		/* Let's attempt to quit the job handler gracefully */
425 		port->fp_soft_state |= FP_DETACH_INPROGRESS;
426 
427 		mutex_exit(&port->fp_mutex);
428 		converse = FC_CMD_ATTACH;
429 		if (fctl_detach_ulps(port, FC_CMD_DETACH,
430 		    &modlinkage) != FC_SUCCESS) {
431 			mutex_enter(&port->fp_mutex);
432 			port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
433 			mutex_exit(&port->fp_mutex);
434 			rval = DDI_FAILURE;
435 			break;
436 		}
437 
438 		mutex_enter(&port->fp_mutex);
439 		for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
440 		    cnt++) {
441 			mutex_exit(&port->fp_mutex);
442 			delay(drv_usectohz(1000000));
443 			mutex_enter(&port->fp_mutex);
444 		}
445 
446 		if (port->fp_job_head) {
447 			mutex_exit(&port->fp_mutex);
448 			rval = DDI_FAILURE;
449 			break;
450 		}
451 		mutex_exit(&port->fp_mutex);
452 
453 		rval = fp_detach_handler(port);
454 		break;
455 
456 	case DDI_SUSPEND:
457 		mutex_exit(&port->fp_mutex);
458 		converse = FC_CMD_RESUME;
459 		if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
460 		    &modlinkage) != FC_SUCCESS) {
461 			rval = DDI_FAILURE;
462 			break;
463 		}
464 		if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
465 			(void) callb_generic_cpr(&port->fp_cpr_info,
466 			    CB_CODE_CPR_RESUME);
467 		}
468 		break;
469 
470 	default:
471 		mutex_exit(&port->fp_mutex);
472 		break;
473 	}
474 
475 	/*
476 	 * Use softint to perform reattach.  Mark fp_ulp_attach so we
477 	 * don't attempt to do this repeatedly on behalf of some persistent
478 	 * caller.
479 	 */
480 	if (rval != DDI_SUCCESS) {
481 		mutex_enter(&port->fp_mutex);
482 		port->fp_ulp_attach = 1;
483 
484 		/*
485 		 * If the port is in the low power mode then there is
486 		 * possibility that fca too could be in low power mode.
487 		 * Try to raise the power before calling attach ulps.
488 		 */
489 
490 		if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
491 		    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
492 			mutex_exit(&port->fp_mutex);
493 			(void) pm_raise_power(port->fp_port_dip,
494 			    FP_PM_COMPONENT, FP_PM_PORT_UP);
495 		} else {
496 			mutex_exit(&port->fp_mutex);
497 		}
498 
499 
500 		fp_attach_ulps(port, converse);
501 
502 		mutex_enter(&port->fp_mutex);
503 		while (port->fp_ulp_attach) {
504 			cv_wait(&port->fp_attach_cv, &port->fp_mutex);
505 		}
506 
507 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
508 
509 		/*
510 		 * Mark state as detach failed so asynchronous ULP attach
511 		 * events (downstream, not the ones we're initiating with
512 		 * the call to fp_attach_ulps) are not honored.	 We're
513 		 * really still in pending detach.
514 		 */
515 		port->fp_soft_state |= FP_DETACH_FAILED;
516 
517 		mutex_exit(&port->fp_mutex);
518 	}
519 
520 	return (rval);
521 }
522 
523 
524 /*
525  * fp_getinfo:
526  *   Given the device number, return either the
527  *   dev_info_t pointer or the instance number.
528  */
529 
530 /* ARGSUSED */
531 static int
532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
533 {
534 	int		rval;
535 	minor_t		instance;
536 	fc_local_port_t *port;
537 
538 	rval = DDI_SUCCESS;
539 	instance = getminor((dev_t)arg);
540 
541 	switch (cmd) {
542 	case DDI_INFO_DEVT2DEVINFO:
543 		if ((port = ddi_get_soft_state(fp_driver_softstate,
544 		    instance)) == NULL) {
545 			rval = DDI_FAILURE;
546 			break;
547 		}
548 		*result = (void *)port->fp_port_dip;
549 		break;
550 
551 	case DDI_INFO_DEVT2INSTANCE:
552 		*result = (void *)(uintptr_t)instance;
553 		break;
554 
555 	default:
556 		rval = DDI_FAILURE;
557 		break;
558 	}
559 
560 	return (rval);
561 }
562 
563 
564 /*
565  * Entry point for power up and power down request from kernel
566  */
567 static int
568 fp_power(dev_info_t *dip, int comp, int level)
569 {
570 	int		rval = DDI_FAILURE;
571 	fc_local_port_t	*port;
572 
573 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
574 	if (port == NULL || comp != FP_PM_COMPONENT) {
575 		return (rval);
576 	}
577 
578 	switch (level) {
579 	case FP_PM_PORT_UP:
580 		rval = DDI_SUCCESS;
581 
582 		/*
583 		 * If the port is DDI_SUSPENDed, let the DDI_RESUME
584 		 * code complete the rediscovery.
585 		 */
586 		mutex_enter(&port->fp_mutex);
587 		if (port->fp_soft_state & FP_SOFT_SUSPEND) {
588 			port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
589 			port->fp_pm_level = FP_PM_PORT_UP;
590 			mutex_exit(&port->fp_mutex);
591 			fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
592 			break;
593 		}
594 
595 		if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
596 			ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
597 
598 			port->fp_pm_level = FP_PM_PORT_UP;
599 			rval = fp_power_up(port);
600 			if (rval != DDI_SUCCESS) {
601 				port->fp_pm_level = FP_PM_PORT_DOWN;
602 			}
603 		} else {
604 			port->fp_pm_level = FP_PM_PORT_UP;
605 		}
606 		mutex_exit(&port->fp_mutex);
607 		break;
608 
609 	case FP_PM_PORT_DOWN:
610 		mutex_enter(&port->fp_mutex);
611 
612 		ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
613 		if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
614 			/*
615 			 * PM framework goofed up. We have don't
616 			 * have any PM components. Let's never go down.
617 			 */
618 			mutex_exit(&port->fp_mutex);
619 			break;
620 
621 		}
622 
623 		if (port->fp_ulp_attach) {
624 			/* We shouldn't let the power go down */
625 			mutex_exit(&port->fp_mutex);
626 			break;
627 		}
628 
629 		/*
630 		 * Not a whole lot to do if we are detaching
631 		 */
632 		if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
633 			port->fp_pm_level = FP_PM_PORT_DOWN;
634 			mutex_exit(&port->fp_mutex);
635 			rval = DDI_SUCCESS;
636 			break;
637 		}
638 
639 		if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
640 			port->fp_pm_level = FP_PM_PORT_DOWN;
641 
642 			rval = fp_power_down(port);
643 			if (rval != DDI_SUCCESS) {
644 				port->fp_pm_level = FP_PM_PORT_UP;
645 				ASSERT(!(port->fp_soft_state &
646 				    FP_SOFT_POWER_DOWN));
647 			} else {
648 				ASSERT(port->fp_soft_state &
649 				    FP_SOFT_POWER_DOWN);
650 			}
651 		}
652 		mutex_exit(&port->fp_mutex);
653 		break;
654 
655 	default:
656 		break;
657 	}
658 
659 	return (rval);
660 }
661 
662 
663 /*
664  * Open FC port devctl node
665  */
666 static int
667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
668 {
669 	int		instance;
670 	fc_local_port_t *port;
671 
672 	if (otype != OTYP_CHR) {
673 		return (EINVAL);
674 	}
675 
676 	/*
677 	 * This is not a toy to play with. Allow only powerful
678 	 * users (hopefully knowledgeable) to access the port
679 	 * (A hacker potentially could download a sick binary
680 	 * file into FCA)
681 	 */
682 	if (drv_priv(credp)) {
683 		return (EPERM);
684 	}
685 
686 	instance = (int)getminor(*devp);
687 
688 	port = ddi_get_soft_state(fp_driver_softstate, instance);
689 	if (port == NULL) {
690 		return (ENXIO);
691 	}
692 
693 	mutex_enter(&port->fp_mutex);
694 	if (port->fp_flag & FP_EXCL) {
695 		/*
696 		 * It is already open for exclusive access.
697 		 * So shut the door on this caller.
698 		 */
699 		mutex_exit(&port->fp_mutex);
700 		return (EBUSY);
701 	}
702 
703 	if (flag & FEXCL) {
704 		if (port->fp_flag & FP_OPEN) {
705 			/*
706 			 * Exclusive operation not possible
707 			 * as it is already opened
708 			 */
709 			mutex_exit(&port->fp_mutex);
710 			return (EBUSY);
711 		}
712 		port->fp_flag |= FP_EXCL;
713 	}
714 	port->fp_flag |= FP_OPEN;
715 	mutex_exit(&port->fp_mutex);
716 
717 	return (0);
718 }
719 
720 
721 /*
722  * The driver close entry point is called on the last close()
723  * of a device. So it is perfectly alright to just clobber the
724  * open flag and reset it to idle (instead of having to reset
725  * each flag bits). For any confusion, check out close(9E).
726  */
727 
728 /* ARGSUSED */
729 static int
730 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
731 {
732 	int		instance;
733 	fc_local_port_t *port;
734 
735 	if (otype != OTYP_CHR) {
736 		return (EINVAL);
737 	}
738 
739 	instance = (int)getminor(dev);
740 
741 	port = ddi_get_soft_state(fp_driver_softstate, instance);
742 	if (port == NULL) {
743 		return (ENXIO);
744 	}
745 
746 	mutex_enter(&port->fp_mutex);
747 	if ((port->fp_flag & FP_OPEN) == 0) {
748 		mutex_exit(&port->fp_mutex);
749 		return (ENODEV);
750 	}
751 	port->fp_flag = FP_IDLE;
752 	mutex_exit(&port->fp_mutex);
753 
754 	return (0);
755 }
756 
757 /*
758  * Handle IOCTL requests
759  */
760 
761 /* ARGSUSED */
762 static int
763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
764 {
765 	int		instance;
766 	int		ret = 0;
767 	fcio_t		fcio;
768 	fc_local_port_t *port;
769 
770 	instance = (int)getminor(dev);
771 
772 	port = ddi_get_soft_state(fp_driver_softstate, instance);
773 	if (port == NULL) {
774 		return (ENXIO);
775 	}
776 
777 	mutex_enter(&port->fp_mutex);
778 	if ((port->fp_flag & FP_OPEN) == 0) {
779 		mutex_exit(&port->fp_mutex);
780 		return (ENXIO);
781 	}
782 
783 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
784 		mutex_exit(&port->fp_mutex);
785 		return (ENXIO);
786 	}
787 
788 	mutex_exit(&port->fp_mutex);
789 
790 	/* this will raise power if necessary */
791 	ret = fctl_busy_port(port);
792 	if (ret != 0) {
793 		return (ret);
794 	}
795 
796 	ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
797 
798 
799 	switch (cmd) {
800 	case FCIO_CMD: {
801 #ifdef	_MULTI_DATAMODEL
802 		switch (ddi_model_convert_from(mode & FMODELS)) {
803 		case DDI_MODEL_ILP32: {
804 			struct fcio32 fcio32;
805 
806 			if (ddi_copyin((void *)data, (void *)&fcio32,
807 			    sizeof (struct fcio32), mode)) {
808 				ret = EFAULT;
809 				break;
810 			}
811 			fcio.fcio_xfer = fcio32.fcio_xfer;
812 			fcio.fcio_cmd = fcio32.fcio_cmd;
813 			fcio.fcio_flags = fcio32.fcio_flags;
814 			fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
815 			fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
816 			fcio.fcio_ibuf =
817 			    (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
818 			fcio.fcio_olen = (size_t)fcio32.fcio_olen;
819 			fcio.fcio_obuf =
820 			    (caddr_t)(uintptr_t)fcio32.fcio_obuf;
821 			fcio.fcio_alen = (size_t)fcio32.fcio_alen;
822 			fcio.fcio_abuf =
823 			    (caddr_t)(uintptr_t)fcio32.fcio_abuf;
824 			fcio.fcio_errno = fcio32.fcio_errno;
825 			break;
826 		}
827 
828 		case DDI_MODEL_NONE:
829 			if (ddi_copyin((void *)data, (void *)&fcio,
830 			    sizeof (fcio_t), mode)) {
831 				ret = EFAULT;
832 			}
833 			break;
834 		}
835 #else	/* _MULTI_DATAMODEL */
836 		if (ddi_copyin((void *)data, (void *)&fcio,
837 		    sizeof (fcio_t), mode)) {
838 			ret = EFAULT;
839 			break;
840 		}
841 #endif	/* _MULTI_DATAMODEL */
842 		if (!ret) {
843 			ret = fp_fciocmd(port, data, mode, &fcio);
844 		}
845 		break;
846 	}
847 
848 	default:
849 		ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
850 		    mode, credp, rval);
851 	}
852 
853 	fctl_idle_port(port);
854 
855 	return (ret);
856 }
857 
858 
859 /*
860  * Init Symbolic Port Name and Node Name
861  * LV will try to get symbolic names from FCA driver
862  * and register these to name server,
863  * if LV fails to get these,
864  * LV will register its default symbolic names to name server.
865  * The Default symbolic node name format is :
866  *	<hostname>:<hba driver name>(instance)
867  * The Default symbolic port name format is :
868  *	<fp path name>
869  */
870 static void
871 fp_init_symbolic_names(fc_local_port_t *port)
872 {
873 	const char *vendorname = ddi_driver_name(port->fp_fca_dip);
874 	char *sym_name;
875 	char fcaname[50] = {0};
876 	int hostnlen, fcanlen;
877 
878 	if (port->fp_sym_node_namelen == 0) {
879 		hostnlen = strlen(utsname.nodename);
880 		(void) snprintf(fcaname, sizeof (fcaname),
881 		    "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
882 		fcanlen = strlen(fcaname);
883 
884 		sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
885 		(void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
886 		port->fp_sym_node_namelen = strlen(sym_name);
887 		if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
888 			port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
889 		}
890 		(void) strncpy(port->fp_sym_node_name, sym_name,
891 		    port->fp_sym_node_namelen);
892 		kmem_free(sym_name, hostnlen + fcanlen + 2);
893 	}
894 
895 	if (port->fp_sym_port_namelen == 0) {
896 		char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
897 
898 		(void) ddi_pathname(port->fp_port_dip, pathname);
899 		port->fp_sym_port_namelen = strlen(pathname);
900 		if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
901 			port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
902 		}
903 		(void) strncpy(port->fp_sym_port_name, pathname,
904 		    port->fp_sym_port_namelen);
905 		kmem_free(pathname, MAXPATHLEN);
906 	}
907 }
908 
909 
910 /*
911  * Perform port attach
912  */
913 static int
914 fp_attach_handler(dev_info_t *dip)
915 {
916 	int			rval;
917 	int			instance;
918 	int			port_num;
919 	int			port_len;
920 	char			name[30];
921 	char			i_pwwn[17];
922 	fp_cmd_t		*pkt;
923 	uint32_t		ub_count;
924 	fc_local_port_t		*port;
925 	job_request_t		*job;
926 	fc_local_port_t *phyport = NULL;
927 	int portpro1;
928 	char pwwn[17], nwwn[17];
929 
930 	instance = ddi_get_instance(dip);
931 
932 	port_len = sizeof (port_num);
933 
934 	rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
935 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
936 	    (caddr_t)&port_num, &port_len);
937 
938 	if (rval != DDI_SUCCESS) {
939 		cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
940 		    instance);
941 		return (DDI_FAILURE);
942 	}
943 
944 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
945 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
946 		cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
947 		    instance);
948 		return (DDI_FAILURE);
949 	}
950 
951 	if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
952 	    DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
953 		cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
954 		    " point minor node", instance);
955 		ddi_remove_minor_node(dip, NULL);
956 		return (DDI_FAILURE);
957 	}
958 
959 	if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
960 	    != DDI_SUCCESS) {
961 		cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
962 		    instance);
963 		ddi_remove_minor_node(dip, NULL);
964 		return (DDI_FAILURE);
965 	}
966 	port = ddi_get_soft_state(fp_driver_softstate, instance);
967 
968 	(void) sprintf(port->fp_ibuf, "fp(%d)", instance);
969 
970 	port->fp_instance = instance;
971 	port->fp_ulp_attach = 1;
972 	port->fp_port_num = port_num;
973 	port->fp_verbose = fp_verbosity;
974 	port->fp_options = fp_options;
975 
976 	port->fp_fca_dip = ddi_get_parent(dip);
977 	port->fp_port_dip = dip;
978 	port->fp_fca_tran = (fc_fca_tran_t *)
979 	    ddi_get_driver_private(port->fp_fca_dip);
980 
981 	port->fp_task = port->fp_last_task = FP_TASK_IDLE;
982 
983 	/*
984 	 * Init the starting value of fp_rscn_count. Note that if
985 	 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
986 	 * actual # of RSCNs will be (fp_rscn_count - 1)
987 	 */
988 	port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
989 
990 	mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
991 	cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
992 	cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
993 
994 	(void) sprintf(name, "fp%d_cache", instance);
995 
996 	if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
997 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
998 	    "phyport-instance", -1)) != -1) {
999 		phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
1000 		fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
1001 		fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
1002 		port->fp_npiv_type = FC_NPIV_PORT;
1003 	}
1004 
1005 	/*
1006 	 * Allocate the pool of fc_packet_t structs to be used with
1007 	 * this fp instance.
1008 	 */
1009 	port->fp_pkt_cache = kmem_cache_create(name,
1010 	    (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1011 	    fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1012 	    NULL, 0);
1013 	port->fp_out_fpcmds = 0;
1014 	if (port->fp_pkt_cache == NULL) {
1015 		goto cache_alloc_failed;
1016 	}
1017 
1018 
1019 	/*
1020 	 * Allocate the d_id and pwwn hash tables for all remote ports
1021 	 * connected to this local port.
1022 	 */
1023 	port->fp_did_table = kmem_zalloc(did_table_size *
1024 	    sizeof (struct d_id_hash), KM_SLEEP);
1025 
1026 	port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1027 	    sizeof (struct pwwn_hash), KM_SLEEP);
1028 
1029 	port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1030 	    MINCLSYSPRI, 1, 16, 0);
1031 
1032 	/* Indicate that don't have the pm components yet */
1033 	port->fp_soft_state |=	FP_SOFT_NO_PMCOMP;
1034 
1035 	/*
1036 	 * Bind the callbacks with the FCA driver. This will open the gate
1037 	 * for asynchronous callbacks, so after this call the fp_mutex
1038 	 * must be held when updating the fc_local_port_t struct.
1039 	 *
1040 	 * This is done _before_ setting up the job thread so we can avoid
1041 	 * cleaning up after the thread_create() in the error path. This
1042 	 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1043 	 */
1044 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1045 		goto bind_callbacks_failed;
1046 	}
1047 
1048 	if (phyport) {
1049 		mutex_enter(&phyport->fp_mutex);
1050 		if (phyport->fp_port_next) {
1051 			phyport->fp_port_next->fp_port_prev = port;
1052 			port->fp_port_next =  phyport->fp_port_next;
1053 			phyport->fp_port_next = port;
1054 			port->fp_port_prev = phyport;
1055 		} else {
1056 			phyport->fp_port_next = port;
1057 			phyport->fp_port_prev = port;
1058 			port->fp_port_next =  phyport;
1059 			port->fp_port_prev = phyport;
1060 		}
1061 		mutex_exit(&phyport->fp_mutex);
1062 	}
1063 
1064 	/*
1065 	 * Init Symbolic Names
1066 	 */
1067 	fp_init_symbolic_names(port);
1068 
1069 	pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1070 	    KM_SLEEP, NULL);
1071 
1072 	if (pkt == NULL) {
1073 		cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1074 		    instance);
1075 		goto alloc_els_packet_failed;
1076 	}
1077 
1078 	(void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1079 	    v.v_maxsyspri - 2);
1080 
1081 	fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1082 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1083 	    i_pwwn) != DDI_PROP_SUCCESS) {
1084 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1085 		    "fp(%d): Updating 'initiator-port' property"
1086 		    " on fp dev_info node failed", instance);
1087 	}
1088 
1089 	fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1090 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1091 	    i_pwwn) != DDI_PROP_SUCCESS) {
1092 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1093 		    "fp(%d): Updating 'initiator-node' property"
1094 		    " on fp dev_info node failed", instance);
1095 	}
1096 
1097 	mutex_enter(&port->fp_mutex);
1098 	port->fp_els_resp_pkt = pkt;
1099 	mutex_exit(&port->fp_mutex);
1100 
1101 	/*
1102 	 * Determine the count of unsolicited buffers this FCA can support
1103 	 */
1104 	fp_retrieve_caps(port);
1105 
1106 	/*
1107 	 * Allocate unsolicited buffer tokens
1108 	 */
1109 	if (port->fp_ub_count) {
1110 		ub_count = port->fp_ub_count;
1111 		port->fp_ub_tokens = kmem_zalloc(ub_count *
1112 		    sizeof (*port->fp_ub_tokens), KM_SLEEP);
1113 		/*
1114 		 * Do not fail the attach if unsolicited buffer allocation
1115 		 * fails; Just try to get along with whatever the FCA can do.
1116 		 */
1117 		if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1118 		    FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1119 		    FC_SUCCESS || ub_count != port->fp_ub_count) {
1120 			cmn_err(CE_WARN, "fp(%d): failed to allocate "
1121 			    " Unsolicited buffers. proceeding with attach...",
1122 			    instance);
1123 			kmem_free(port->fp_ub_tokens,
1124 			    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1125 			port->fp_ub_tokens = NULL;
1126 		}
1127 	}
1128 
1129 	fp_load_ulp_modules(dip, port);
1130 
1131 	/*
1132 	 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1133 	 */
1134 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1135 	    "pm-hardware-state", "needs-suspend-resume",
1136 	    strlen("needs-suspend-resume") + 1);
1137 
1138 	/*
1139 	 * fctl maintains a list of all port handles, so
1140 	 * help fctl add this one to its list now.
1141 	 */
1142 	mutex_enter(&port->fp_mutex);
1143 	fctl_add_port(port);
1144 
1145 	/*
1146 	 * If a state change is already in progress, set the bind state t
1147 	 * OFFLINE as well, so further state change callbacks into ULPs
1148 	 * will pass the appropriate states
1149 	 */
1150 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1151 	    port->fp_statec_busy) {
1152 		port->fp_bind_state = FC_STATE_OFFLINE;
1153 		mutex_exit(&port->fp_mutex);
1154 
1155 		fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1156 	} else {
1157 		/*
1158 		 * Without dropping the mutex, ensure that the port
1159 		 * startup happens ahead of state change callback
1160 		 * processing
1161 		 */
1162 		ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1163 
1164 		port->fp_last_task = port->fp_task;
1165 		port->fp_task = FP_TASK_PORT_STARTUP;
1166 
1167 		job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1168 		    fp_startup_done, (opaque_t)port, KM_SLEEP);
1169 
1170 		port->fp_job_head = port->fp_job_tail = job;
1171 
1172 		cv_signal(&port->fp_cv);
1173 
1174 		mutex_exit(&port->fp_mutex);
1175 	}
1176 
1177 	mutex_enter(&port->fp_mutex);
1178 	while (port->fp_ulp_attach) {
1179 		cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1180 	}
1181 	mutex_exit(&port->fp_mutex);
1182 
1183 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1184 	    "pm-components", fp_pm_comps,
1185 	    sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1186 	    DDI_PROP_SUCCESS) {
1187 		FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1188 		    " components property, PM disabled on this port.");
1189 		mutex_enter(&port->fp_mutex);
1190 		port->fp_pm_level = FP_PM_PORT_UP;
1191 		mutex_exit(&port->fp_mutex);
1192 	} else {
1193 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1194 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1195 			FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1196 			    " power level");
1197 			mutex_enter(&port->fp_mutex);
1198 			port->fp_pm_level = FP_PM_PORT_UP;
1199 			mutex_exit(&port->fp_mutex);
1200 		}
1201 
1202 		/*
1203 		 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1204 		 * the call to pm_raise_power.	The PM framework can't
1205 		 * handle multiple threads calling into it during attach.
1206 		 */
1207 
1208 		mutex_enter(&port->fp_mutex);
1209 		port->fp_soft_state &=	~FP_SOFT_NO_PMCOMP;
1210 		mutex_exit(&port->fp_mutex);
1211 	}
1212 
1213 	ddi_report_dev(dip);
1214 
1215 	fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1216 
1217 	return (DDI_SUCCESS);
1218 
1219 	/*
1220 	 * Unwind any/all preceeding allocations in the event of an error.
1221 	 */
1222 
1223 alloc_els_packet_failed:
1224 
1225 	if (port->fp_fca_handle != NULL) {
1226 		port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1227 		port->fp_fca_handle = NULL;
1228 	}
1229 
1230 	if (port->fp_ub_tokens != NULL) {
1231 		(void) fc_ulp_ubfree(port, port->fp_ub_count,
1232 		    port->fp_ub_tokens);
1233 		kmem_free(port->fp_ub_tokens,
1234 		    port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1235 		port->fp_ub_tokens = NULL;
1236 	}
1237 
1238 	if (port->fp_els_resp_pkt != NULL) {
1239 		fp_free_pkt(port->fp_els_resp_pkt);
1240 		port->fp_els_resp_pkt = NULL;
1241 	}
1242 
1243 bind_callbacks_failed:
1244 
1245 	if (port->fp_taskq != NULL) {
1246 		taskq_destroy(port->fp_taskq);
1247 	}
1248 
1249 	if (port->fp_pwwn_table != NULL) {
1250 		kmem_free(port->fp_pwwn_table,
1251 		    pwwn_table_size * sizeof (struct pwwn_hash));
1252 		port->fp_pwwn_table = NULL;
1253 	}
1254 
1255 	if (port->fp_did_table != NULL) {
1256 		kmem_free(port->fp_did_table,
1257 		    did_table_size * sizeof (struct d_id_hash));
1258 		port->fp_did_table = NULL;
1259 	}
1260 
1261 	if (port->fp_pkt_cache != NULL) {
1262 		kmem_cache_destroy(port->fp_pkt_cache);
1263 		port->fp_pkt_cache = NULL;
1264 	}
1265 
1266 cache_alloc_failed:
1267 
1268 	cv_destroy(&port->fp_attach_cv);
1269 	cv_destroy(&port->fp_cv);
1270 	mutex_destroy(&port->fp_mutex);
1271 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1272 	ddi_soft_state_free(fp_driver_softstate, instance);
1273 	ddi_prop_remove_all(dip);
1274 
1275 	return (DDI_FAILURE);
1276 }
1277 
1278 
1279 /*
1280  * Handle DDI_RESUME request
1281  */
1282 static int
1283 fp_resume_handler(dev_info_t *dip)
1284 {
1285 	int		rval;
1286 	fc_local_port_t *port;
1287 
1288 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1289 
1290 	ASSERT(port != NULL);
1291 
1292 #ifdef	DEBUG
1293 	mutex_enter(&port->fp_mutex);
1294 	ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1295 	mutex_exit(&port->fp_mutex);
1296 #endif
1297 
1298 	/*
1299 	 * If the port was power suspended, raise the power level
1300 	 */
1301 	mutex_enter(&port->fp_mutex);
1302 	if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1303 	    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1304 		ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1305 
1306 		mutex_exit(&port->fp_mutex);
1307 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1308 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1309 			FP_TRACE(FP_NHEAD2(9, 0),
1310 			    "Failed to raise the power level");
1311 			return (DDI_FAILURE);
1312 		}
1313 		mutex_enter(&port->fp_mutex);
1314 	}
1315 	port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1316 	mutex_exit(&port->fp_mutex);
1317 
1318 	/*
1319 	 * All the discovery is initiated and handled by per-port thread.
1320 	 * Further all the discovery is done in handled in callback mode
1321 	 * (not polled mode); In a specific case such as this, the discovery
1322 	 * is required to happen in polled mode. The easiest way out is
1323 	 * to bail out port thread and get started. Come back and fix this
1324 	 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1325 	 * will do on-demand discovery during pre-power-up busctl handling
1326 	 * which will only be possible when SCSA provides a new HBA vector
1327 	 * for sending down the PM busctl requests.
1328 	 */
1329 	(void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1330 
1331 	rval = fp_resume_all(port, FC_CMD_RESUME);
1332 	if (rval != DDI_SUCCESS) {
1333 		mutex_enter(&port->fp_mutex);
1334 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1335 		mutex_exit(&port->fp_mutex);
1336 		(void) callb_generic_cpr(&port->fp_cpr_info,
1337 		    CB_CODE_CPR_CHKPT);
1338 	}
1339 
1340 	return (rval);
1341 }
1342 
1343 /*
1344  * Perform FC Port power on initialization
1345  */
1346 static int
1347 fp_power_up(fc_local_port_t *port)
1348 {
1349 	int	rval;
1350 
1351 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1352 
1353 	ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1354 	ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1355 
1356 	port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1357 
1358 	mutex_exit(&port->fp_mutex);
1359 
1360 	rval = fp_resume_all(port, FC_CMD_POWER_UP);
1361 	if (rval != DDI_SUCCESS) {
1362 		mutex_enter(&port->fp_mutex);
1363 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1364 	} else {
1365 		mutex_enter(&port->fp_mutex);
1366 	}
1367 
1368 	return (rval);
1369 }
1370 
1371 
1372 /*
1373  * It is important to note that the power may possibly be removed between
1374  * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1375  * FC port hardware would have gone through an OFFLINE to ONLINE transition
1376  * (hardware state). In this case, the port driver may need to rediscover the
1377  * topology, perform LOGINs, register with the name server again and perform
1378  * any such port initialization procedures. To perform LOGINs, the driver could
1379  * use the port device handle to see if a LOGIN needs to be performed and use
1380  * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1381  * or removed) which will be reflected in the map the ULPs will see.
1382  */
1383 static int
1384 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1385 {
1386 
1387 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1388 
1389 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1390 		return (DDI_FAILURE);
1391 	}
1392 
1393 	mutex_enter(&port->fp_mutex);
1394 
1395 	/*
1396 	 * If there are commands queued for delayed retry, instead of
1397 	 * working the hard way to figure out which ones are good for
1398 	 * restart and which ones not (ELSs are definitely not good
1399 	 * as the port will have to go through a new spin of rediscovery
1400 	 * now), so just flush them out.
1401 	 */
1402 	if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1403 		fp_cmd_t	*cmd;
1404 
1405 		port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1406 
1407 		mutex_exit(&port->fp_mutex);
1408 		while ((cmd = fp_deque_cmd(port)) != NULL) {
1409 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1410 			fp_iodone(cmd);
1411 		}
1412 		mutex_enter(&port->fp_mutex);
1413 	}
1414 
1415 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1416 		if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1417 		    port->fp_dev_count) {
1418 			port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1419 			port->fp_offline_tid = timeout(fp_offline_timeout,
1420 			    (caddr_t)port, fp_offline_ticks);
1421 		}
1422 		if (port->fp_job_head) {
1423 			cv_signal(&port->fp_cv);
1424 		}
1425 		mutex_exit(&port->fp_mutex);
1426 		fctl_attach_ulps(port, cmd, &modlinkage);
1427 	} else {
1428 		struct job_request *job;
1429 
1430 		/*
1431 		 * If an OFFLINE timer was running at the time of
1432 		 * suspending, there is no need to restart it as
1433 		 * the port is ONLINE now.
1434 		 */
1435 		port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1436 		if (port->fp_statec_busy == 0) {
1437 			port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1438 		}
1439 		port->fp_statec_busy++;
1440 		mutex_exit(&port->fp_mutex);
1441 
1442 		job = fctl_alloc_job(JOB_PORT_ONLINE,
1443 		    JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1444 		fctl_enque_job(port, job);
1445 
1446 		fctl_jobwait(job);
1447 		fctl_remove_oldies(port);
1448 
1449 		fctl_attach_ulps(port, cmd, &modlinkage);
1450 		fctl_dealloc_job(job);
1451 	}
1452 
1453 	return (DDI_SUCCESS);
1454 }
1455 
1456 
1457 /*
1458  * At this time, there shouldn't be any I/O requests on this port.
1459  * But the unsolicited callbacks from the underlying FCA port need
1460  * to be handled very carefully. The steps followed to handle the
1461  * DDI_DETACH are:
1462  *	+	Grab the port driver mutex, check if the unsolicited
1463  *		callback is currently under processing. If true, fail
1464  *		the DDI_DETACH request by printing a message; If false
1465  *		mark the DDI_DETACH as under progress, so that any
1466  *		further unsolicited callbacks get bounced.
1467  *	+	Perform PRLO/LOGO if necessary, cleanup all the data
1468  *		structures.
1469  *	+	Get the job_handler thread to gracefully exit.
1470  *	+	Unregister callbacks with the FCA port.
1471  *	+	Now that some peace is found, notify all the ULPs of
1472  *		DDI_DETACH request (using ulp_port_detach entry point)
1473  *	+	Free all mutexes, semaphores, conditional variables.
1474  *	+	Free the soft state, return success.
1475  *
1476  * Important considerations:
1477  *		Port driver de-registers state change and unsolicited
1478  *		callbacks before taking up the task of notifying ULPs
1479  *		and performing PRLO and LOGOs.
1480  *
1481  *		A port may go offline at the time PRLO/LOGO is being
1482  *		requested. It is expected of all FCA drivers to fail
1483  *		such requests either immediately with a FC_OFFLINE
1484  *		return code to fc_fca_transport() or return the packet
1485  *		asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1486  */
1487 static int
1488 fp_detach_handler(fc_local_port_t *port)
1489 {
1490 	job_request_t	*job;
1491 	uint32_t	delay_count;
1492 	fc_orphan_t	*orp, *tmporp;
1493 
1494 	/*
1495 	 * In a Fabric topology with many host ports connected to
1496 	 * a switch, another detaching instance of fp might have
1497 	 * triggered a LOGO (which is an unsolicited request to
1498 	 * this instance). So in order to be able to successfully
1499 	 * detach by taking care of such cases a delay of about
1500 	 * 30 seconds is introduced.
1501 	 */
1502 	delay_count = 0;
1503 	mutex_enter(&port->fp_mutex);
1504 	if (port->fp_out_fpcmds != 0) {
1505 		/*
1506 		 * At this time we can only check fp internal commands, because
1507 		 * sd/ssd/scsi_vhci should have finsihed all their commands,
1508 		 * fcp/fcip/fcsm should have finished all their commands.
1509 		 *
1510 		 * It seems that all fp internal commands are asynchronous now.
1511 		 */
1512 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1513 		mutex_exit(&port->fp_mutex);
1514 
1515 		cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1516 		    " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1517 		return (DDI_FAILURE);
1518 	}
1519 
1520 	while ((port->fp_soft_state &
1521 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1522 	    (delay_count < 30)) {
1523 		mutex_exit(&port->fp_mutex);
1524 		delay_count++;
1525 		delay(drv_usectohz(1000000));
1526 		mutex_enter(&port->fp_mutex);
1527 	}
1528 
1529 	if (port->fp_soft_state &
1530 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1531 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1532 		mutex_exit(&port->fp_mutex);
1533 
1534 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1535 		    " Failing detach", port->fp_instance);
1536 		return (DDI_FAILURE);
1537 	}
1538 
1539 	port->fp_soft_state |= FP_SOFT_IN_DETACH;
1540 	port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1541 	mutex_exit(&port->fp_mutex);
1542 
1543 	/*
1544 	 * If we're powered down, we need to raise power prior to submitting
1545 	 * the JOB_PORT_SHUTDOWN job.  Otherwise, the job handler will never
1546 	 * process the shutdown job.
1547 	 */
1548 	if (fctl_busy_port(port) != 0) {
1549 		cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1550 		    port->fp_instance);
1551 		mutex_enter(&port->fp_mutex);
1552 		port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1553 		mutex_exit(&port->fp_mutex);
1554 		return (DDI_FAILURE);
1555 	}
1556 
1557 	/*
1558 	 * This will deallocate data structs and cause the "job" thread
1559 	 * to exit, in preparation for DDI_DETACH on the instance.
1560 	 * This can sleep for an arbitrary duration, since it waits for
1561 	 * commands over the wire, timeout(9F) callbacks, etc.
1562 	 *
1563 	 * CAUTION: There is still a race here, where the "job" thread
1564 	 * can still be executing code even tho the fctl_jobwait() call
1565 	 * below has returned to us.  In theory the fp driver could even be
1566 	 * modunloaded even tho the job thread isn't done executing.
1567 	 * without creating the race condition.
1568 	 */
1569 	job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1570 	    (opaque_t)port, KM_SLEEP);
1571 	fctl_enque_job(port, job);
1572 	fctl_jobwait(job);
1573 	fctl_dealloc_job(job);
1574 
1575 
1576 	(void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1577 	    FP_PM_PORT_DOWN);
1578 
1579 	if (port->fp_taskq) {
1580 		taskq_destroy(port->fp_taskq);
1581 	}
1582 
1583 	ddi_prop_remove_all(port->fp_port_dip);
1584 
1585 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1586 
1587 	fctl_remove_port(port);
1588 
1589 	fp_free_pkt(port->fp_els_resp_pkt);
1590 
1591 	if (port->fp_ub_tokens) {
1592 		if (fc_ulp_ubfree(port, port->fp_ub_count,
1593 		    port->fp_ub_tokens) != FC_SUCCESS) {
1594 			cmn_err(CE_WARN, "fp(%d): couldn't free "
1595 			    " unsolicited buffers", port->fp_instance);
1596 		}
1597 		kmem_free(port->fp_ub_tokens,
1598 		    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1599 		port->fp_ub_tokens = NULL;
1600 	}
1601 
1602 	if (port->fp_pkt_cache != NULL) {
1603 		kmem_cache_destroy(port->fp_pkt_cache);
1604 	}
1605 
1606 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1607 
1608 	mutex_enter(&port->fp_mutex);
1609 	if (port->fp_did_table) {
1610 		kmem_free(port->fp_did_table, did_table_size *
1611 		    sizeof (struct d_id_hash));
1612 	}
1613 
1614 	if (port->fp_pwwn_table) {
1615 		kmem_free(port->fp_pwwn_table, pwwn_table_size *
1616 		    sizeof (struct pwwn_hash));
1617 	}
1618 	orp = port->fp_orphan_list;
1619 	while (orp) {
1620 		tmporp = orp;
1621 		orp = orp->orp_next;
1622 		kmem_free(tmporp, sizeof (*orp));
1623 	}
1624 
1625 	mutex_exit(&port->fp_mutex);
1626 
1627 	fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1628 
1629 	mutex_destroy(&port->fp_mutex);
1630 	cv_destroy(&port->fp_attach_cv);
1631 	cv_destroy(&port->fp_cv);
1632 	ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1633 
1634 	return (DDI_SUCCESS);
1635 }
1636 
1637 
1638 /*
1639  * Steps to perform DDI_SUSPEND operation on a FC port
1640  *
1641  *	- If already suspended return DDI_FAILURE
1642  *	- If already power-suspended return DDI_SUCCESS
1643  *	- If an unsolicited callback or state change handling is in
1644  *	    in progress, throw a warning message, return DDI_FAILURE
1645  *	- Cancel timeouts
1646  *	- SUSPEND the job_handler thread (means do nothing as it is
1647  *	    taken care of by the CPR frame work)
1648  */
1649 static int
1650 fp_suspend_handler(fc_local_port_t *port)
1651 {
1652 	uint32_t	delay_count;
1653 
1654 	mutex_enter(&port->fp_mutex);
1655 
1656 	/*
1657 	 * The following should never happen, but
1658 	 * let the driver be more defensive here
1659 	 */
1660 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1661 		mutex_exit(&port->fp_mutex);
1662 		return (DDI_FAILURE);
1663 	}
1664 
1665 	/*
1666 	 * If the port is already power suspended, there
1667 	 * is nothing else to do, So return DDI_SUCCESS,
1668 	 * but mark the SUSPEND bit in the soft state
1669 	 * before leaving.
1670 	 */
1671 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1672 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1673 		mutex_exit(&port->fp_mutex);
1674 		return (DDI_SUCCESS);
1675 	}
1676 
1677 	/*
1678 	 * Check if an unsolicited callback or state change handling is
1679 	 * in progress. If true, fail the suspend operation; also throw
1680 	 * a warning message notifying the failure. Note that Sun PCI
1681 	 * hotplug spec recommends messages in cases of failure (but
1682 	 * not flooding the console)
1683 	 *
1684 	 * Busy waiting for a short interval (500 millisecond ?) to see
1685 	 * if the callback processing completes may be another idea. Since
1686 	 * most of the callback processing involves a lot of work, it
1687 	 * is safe to just fail the SUSPEND operation. It is definitely
1688 	 * not bad to fail the SUSPEND operation if the driver is busy.
1689 	 */
1690 	delay_count = 0;
1691 	while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1692 	    FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1693 		mutex_exit(&port->fp_mutex);
1694 		delay_count++;
1695 		delay(drv_usectohz(1000000));
1696 		mutex_enter(&port->fp_mutex);
1697 	}
1698 
1699 	if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1700 	    FP_SOFT_IN_UNSOL_CB)) {
1701 		mutex_exit(&port->fp_mutex);
1702 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1703 		    " Failing suspend", port->fp_instance);
1704 		return (DDI_FAILURE);
1705 	}
1706 
1707 	/*
1708 	 * Check of FC port thread is busy
1709 	 */
1710 	if (port->fp_job_head) {
1711 		mutex_exit(&port->fp_mutex);
1712 		FP_TRACE(FP_NHEAD2(9, 0),
1713 		    "FC port thread is busy: Failing suspend");
1714 		return (DDI_FAILURE);
1715 	}
1716 	port->fp_soft_state |= FP_SOFT_SUSPEND;
1717 
1718 	fp_suspend_all(port);
1719 	mutex_exit(&port->fp_mutex);
1720 
1721 	return (DDI_SUCCESS);
1722 }
1723 
1724 
1725 /*
1726  * Prepare for graceful power down of a FC port
1727  */
1728 static int
1729 fp_power_down(fc_local_port_t *port)
1730 {
1731 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1732 
1733 	/*
1734 	 * Power down request followed by a DDI_SUSPEND should
1735 	 * never happen; If it does return DDI_SUCCESS
1736 	 */
1737 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1738 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1739 		return (DDI_SUCCESS);
1740 	}
1741 
1742 	/*
1743 	 * If the port is already power suspended, there
1744 	 * is nothing else to do, So return DDI_SUCCESS,
1745 	 */
1746 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1747 		return (DDI_SUCCESS);
1748 	}
1749 
1750 	/*
1751 	 * Check if an unsolicited callback or state change handling
1752 	 * is in progress. If true, fail the PM suspend operation.
1753 	 * But don't print a message unless the verbosity of the
1754 	 * driver desires otherwise.
1755 	 */
1756 	if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1757 	    (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1758 		FP_TRACE(FP_NHEAD2(9, 0),
1759 		    "Unsolicited callback in progress: Failing power down");
1760 		return (DDI_FAILURE);
1761 	}
1762 
1763 	/*
1764 	 * Check of FC port thread is busy
1765 	 */
1766 	if (port->fp_job_head) {
1767 		FP_TRACE(FP_NHEAD2(9, 0),
1768 		    "FC port thread is busy: Failing power down");
1769 		return (DDI_FAILURE);
1770 	}
1771 	port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1772 
1773 	/*
1774 	 * check if the ULPs are ready for power down
1775 	 */
1776 	mutex_exit(&port->fp_mutex);
1777 	if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1778 	    &modlinkage) != FC_SUCCESS) {
1779 		mutex_enter(&port->fp_mutex);
1780 		port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1781 		mutex_exit(&port->fp_mutex);
1782 
1783 		/*
1784 		 * Power back up the obedient ULPs that went down
1785 		 */
1786 		fp_attach_ulps(port, FC_CMD_POWER_UP);
1787 
1788 		FP_TRACE(FP_NHEAD2(9, 0),
1789 		    "ULP(s) busy, detach_ulps failed. Failing power down");
1790 		mutex_enter(&port->fp_mutex);
1791 		return (DDI_FAILURE);
1792 	}
1793 	mutex_enter(&port->fp_mutex);
1794 
1795 	fp_suspend_all(port);
1796 
1797 	return (DDI_SUCCESS);
1798 }
1799 
1800 
1801 /*
1802  * Suspend the entire FC port
1803  */
1804 static void
1805 fp_suspend_all(fc_local_port_t *port)
1806 {
1807 	int			index;
1808 	struct pwwn_hash	*head;
1809 	fc_remote_port_t	*pd;
1810 
1811 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1812 
1813 	if (port->fp_wait_tid != 0) {
1814 		timeout_id_t	tid;
1815 
1816 		tid = port->fp_wait_tid;
1817 		port->fp_wait_tid = (timeout_id_t)NULL;
1818 		mutex_exit(&port->fp_mutex);
1819 		(void) untimeout(tid);
1820 		mutex_enter(&port->fp_mutex);
1821 		port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1822 	}
1823 
1824 	if (port->fp_offline_tid) {
1825 		timeout_id_t	tid;
1826 
1827 		tid = port->fp_offline_tid;
1828 		port->fp_offline_tid = (timeout_id_t)NULL;
1829 		mutex_exit(&port->fp_mutex);
1830 		(void) untimeout(tid);
1831 		mutex_enter(&port->fp_mutex);
1832 		port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1833 	}
1834 	mutex_exit(&port->fp_mutex);
1835 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1836 	mutex_enter(&port->fp_mutex);
1837 
1838 	/*
1839 	 * Mark all devices as OLD, and reset the LOGIN state as well
1840 	 * (this will force the ULPs to perform a LOGIN after calling
1841 	 * fc_portgetmap() during RESUME/PM_RESUME)
1842 	 */
1843 	for (index = 0; index < pwwn_table_size; index++) {
1844 		head = &port->fp_pwwn_table[index];
1845 		pd = head->pwwn_head;
1846 		while (pd != NULL) {
1847 			mutex_enter(&pd->pd_mutex);
1848 			fp_remote_port_offline(pd);
1849 			fctl_delist_did_table(port, pd);
1850 			pd->pd_state = PORT_DEVICE_VALID;
1851 			pd->pd_login_count = 0;
1852 			mutex_exit(&pd->pd_mutex);
1853 			pd = pd->pd_wwn_hnext;
1854 		}
1855 	}
1856 }
1857 
1858 
1859 /*
1860  * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1861  * Performs intializations for fc_packet_t structs.
1862  * Returns 0 for success or -1 for failure.
1863  *
1864  * This function allocates DMA handles for both command and responses.
1865  * Most of the ELSs used have both command and responses so it is strongly
1866  * desired to move them to cache constructor routine.
1867  *
1868  * Context: Can sleep iff called with KM_SLEEP flag.
1869  */
1870 static int
1871 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1872 {
1873 	int		(*cb) (caddr_t);
1874 	fc_packet_t	*pkt;
1875 	fp_cmd_t	*cmd = (fp_cmd_t *)buf;
1876 	fc_local_port_t *port = (fc_local_port_t *)cdarg;
1877 
1878 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1879 
1880 	cmd->cmd_next = NULL;
1881 	cmd->cmd_flags = 0;
1882 	cmd->cmd_dflags = 0;
1883 	cmd->cmd_job = NULL;
1884 	cmd->cmd_port = port;
1885 	pkt = &cmd->cmd_pkt;
1886 
1887 	if (ddi_dma_alloc_handle(port->fp_fca_dip,
1888 	    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1889 	    &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1890 		return (-1);
1891 	}
1892 
1893 	if (ddi_dma_alloc_handle(port->fp_fca_dip,
1894 	    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1895 	    &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1896 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1897 		return (-1);
1898 	}
1899 
1900 	pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1901 	pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1902 	    pkt->pkt_data_cookie_cnt = 0;
1903 	pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1904 	    pkt->pkt_data_cookie = NULL;
1905 	pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1906 
1907 	return (0);
1908 }
1909 
1910 
1911 /*
1912  * fp_cache_destructor: Destructor function for kmem_cache_create().
1913  * Performs un-intializations for fc_packet_t structs.
1914  */
1915 /* ARGSUSED */
1916 static void
1917 fp_cache_destructor(void *buf, void *cdarg)
1918 {
1919 	fp_cmd_t	*cmd = (fp_cmd_t *)buf;
1920 	fc_packet_t	*pkt;
1921 
1922 	pkt = &cmd->cmd_pkt;
1923 	if (pkt->pkt_cmd_dma) {
1924 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1925 	}
1926 
1927 	if (pkt->pkt_resp_dma) {
1928 		ddi_dma_free_handle(&pkt->pkt_resp_dma);
1929 	}
1930 }
1931 
1932 
1933 /*
1934  * Packet allocation for ELS and any other port driver commands
1935  *
1936  * Some ELSs like FLOGI and PLOGI are critical for topology and
1937  * device discovery and a system's inability to allocate memory
1938  * or DVMA resources while performing some of these critical ELSs
1939  * cause a lot of problem. While memory allocation failures are
1940  * rare, DVMA resource failures are common as the applications
1941  * are becoming more and more powerful on huge servers.	 So it
1942  * is desirable to have a framework support to reserve a fragment
1943  * of DVMA. So until this is fixed the correct way, the suffering
1944  * is huge whenever a LIP happens at a time DVMA resources are
1945  * drained out completely - So an attempt needs to be made to
1946  * KM_SLEEP while requesting for these resources, hoping that
1947  * the requests won't hang forever.
1948  *
1949  * The fc_remote_port_t argument is stored into the pkt_pd field in the
1950  * fc_packet_t struct prior to the fc_ulp_init_packet() call.  This
1951  * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1952  * If there is no fc_remote_port_t associated with the fc_packet_t, then
1953  * fp_alloc_pkt() must be called with pd set to NULL.
1954  */
1955 
1956 static fp_cmd_t *
1957 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1958     fc_remote_port_t *pd)
1959 {
1960 	int		rval;
1961 	ulong_t		real_len;
1962 	fp_cmd_t	*cmd;
1963 	fc_packet_t	*pkt;
1964 	int		(*cb) (caddr_t);
1965 	ddi_dma_cookie_t	pkt_cookie;
1966 	ddi_dma_cookie_t	*cp;
1967 	uint32_t		cnt;
1968 
1969 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1970 
1971 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1972 
1973 	cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1974 	if (cmd == NULL) {
1975 		return (cmd);
1976 	}
1977 
1978 	cmd->cmd_ulp_pkt = NULL;
1979 	cmd->cmd_flags = 0;
1980 	pkt = &cmd->cmd_pkt;
1981 	ASSERT(cmd->cmd_dflags == 0);
1982 
1983 	pkt->pkt_datalen = 0;
1984 	pkt->pkt_data = NULL;
1985 	pkt->pkt_state = 0;
1986 	pkt->pkt_action = 0;
1987 	pkt->pkt_reason = 0;
1988 	pkt->pkt_expln = 0;
1989 
1990 	/*
1991 	 * Init pkt_pd with the given pointer; this must be done _before_
1992 	 * the call to fc_ulp_init_packet().
1993 	 */
1994 	pkt->pkt_pd = pd;
1995 
1996 	/* Now call the FCA driver to init its private, per-packet fields */
1997 	if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
1998 		goto alloc_pkt_failed;
1999 	}
2000 
2001 	if (cmd_len) {
2002 		ASSERT(pkt->pkt_cmd_dma != NULL);
2003 
2004 		rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2005 		    port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2006 		    cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2007 		    &pkt->pkt_cmd_acc);
2008 
2009 		if (rval != DDI_SUCCESS) {
2010 			goto alloc_pkt_failed;
2011 		}
2012 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2013 
2014 		if (real_len < cmd_len) {
2015 			goto alloc_pkt_failed;
2016 		}
2017 
2018 		rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2019 		    pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2020 		    DDI_DMA_CONSISTENT, cb, NULL,
2021 		    &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2022 
2023 		if (rval != DDI_DMA_MAPPED) {
2024 			goto alloc_pkt_failed;
2025 		}
2026 
2027 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2028 
2029 		if (pkt->pkt_cmd_cookie_cnt >
2030 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2031 			goto alloc_pkt_failed;
2032 		}
2033 
2034 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2035 
2036 		cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2037 		    pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2038 		    KM_NOSLEEP);
2039 
2040 		if (cp == NULL) {
2041 			goto alloc_pkt_failed;
2042 		}
2043 
2044 		*cp = pkt_cookie;
2045 		cp++;
2046 		for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2047 			ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2048 			*cp = pkt_cookie;
2049 		}
2050 	}
2051 
2052 	if (resp_len) {
2053 		ASSERT(pkt->pkt_resp_dma != NULL);
2054 
2055 		rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2056 		    port->fp_fca_tran->fca_acc_attr,
2057 		    DDI_DMA_CONSISTENT, cb, NULL,
2058 		    (caddr_t *)&pkt->pkt_resp, &real_len,
2059 		    &pkt->pkt_resp_acc);
2060 
2061 		if (rval != DDI_SUCCESS) {
2062 			goto alloc_pkt_failed;
2063 		}
2064 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2065 
2066 		if (real_len < resp_len) {
2067 			goto alloc_pkt_failed;
2068 		}
2069 
2070 		rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2071 		    pkt->pkt_resp, real_len, DDI_DMA_READ |
2072 		    DDI_DMA_CONSISTENT, cb, NULL,
2073 		    &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2074 
2075 		if (rval != DDI_DMA_MAPPED) {
2076 			goto alloc_pkt_failed;
2077 		}
2078 
2079 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2080 
2081 		if (pkt->pkt_resp_cookie_cnt >
2082 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2083 			goto alloc_pkt_failed;
2084 		}
2085 
2086 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2087 
2088 		cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2089 		    pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2090 		    KM_NOSLEEP);
2091 
2092 		if (cp == NULL) {
2093 			goto alloc_pkt_failed;
2094 		}
2095 
2096 		*cp = pkt_cookie;
2097 		cp++;
2098 		for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2099 			ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2100 			*cp = pkt_cookie;
2101 		}
2102 	}
2103 
2104 	pkt->pkt_cmdlen = cmd_len;
2105 	pkt->pkt_rsplen = resp_len;
2106 	pkt->pkt_ulp_private = cmd;
2107 
2108 	return (cmd);
2109 
2110 alloc_pkt_failed:
2111 
2112 	fp_free_dma(cmd);
2113 
2114 	if (pkt->pkt_cmd_cookie != NULL) {
2115 		kmem_free(pkt->pkt_cmd_cookie,
2116 		    pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2117 		pkt->pkt_cmd_cookie = NULL;
2118 	}
2119 
2120 	if (pkt->pkt_resp_cookie != NULL) {
2121 		kmem_free(pkt->pkt_resp_cookie,
2122 		    pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2123 		pkt->pkt_resp_cookie = NULL;
2124 	}
2125 
2126 	kmem_cache_free(port->fp_pkt_cache, cmd);
2127 
2128 	return (NULL);
2129 }
2130 
2131 
2132 /*
2133  * Free FC packet
2134  */
2135 static void
2136 fp_free_pkt(fp_cmd_t *cmd)
2137 {
2138 	fc_local_port_t *port;
2139 	fc_packet_t	*pkt;
2140 
2141 	ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2142 
2143 	cmd->cmd_next = NULL;
2144 	cmd->cmd_job = NULL;
2145 	pkt = &cmd->cmd_pkt;
2146 	pkt->pkt_ulp_private = 0;
2147 	pkt->pkt_tran_flags = 0;
2148 	pkt->pkt_tran_type = 0;
2149 	port = cmd->cmd_port;
2150 
2151 	if (pkt->pkt_cmd_cookie != NULL) {
2152 		kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2153 		    sizeof (ddi_dma_cookie_t));
2154 		pkt->pkt_cmd_cookie = NULL;
2155 	}
2156 
2157 	if (pkt->pkt_resp_cookie != NULL) {
2158 		kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2159 		    sizeof (ddi_dma_cookie_t));
2160 		pkt->pkt_resp_cookie = NULL;
2161 	}
2162 
2163 	fp_free_dma(cmd);
2164 	(void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2165 	kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2166 }
2167 
2168 
2169 /*
2170  * Release DVMA resources
2171  */
2172 static void
2173 fp_free_dma(fp_cmd_t *cmd)
2174 {
2175 	fc_packet_t *pkt = &cmd->cmd_pkt;
2176 
2177 	pkt->pkt_cmdlen = 0;
2178 	pkt->pkt_rsplen = 0;
2179 	pkt->pkt_tran_type = 0;
2180 	pkt->pkt_tran_flags = 0;
2181 
2182 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2183 		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2184 	}
2185 
2186 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2187 		if (pkt->pkt_cmd_acc) {
2188 			ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2189 		}
2190 	}
2191 
2192 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2193 		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2194 	}
2195 
2196 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2197 		if (pkt->pkt_resp_acc) {
2198 			ddi_dma_mem_free(&pkt->pkt_resp_acc);
2199 		}
2200 	}
2201 	cmd->cmd_dflags = 0;
2202 }
2203 
2204 
2205 /*
2206  * Dedicated thread to perform various activities.  One thread for
2207  * each fc_local_port_t (driver soft state) instance.
2208  * Note, this effectively works out to one thread for each local
2209  * port, but there are also some Solaris taskq threads in use on a per-local
2210  * port basis; these also need to be taken into consideration.
2211  */
2212 static void
2213 fp_job_handler(fc_local_port_t *port)
2214 {
2215 	int			rval;
2216 	uint32_t		*d_id;
2217 	fc_remote_port_t	*pd;
2218 	job_request_t		*job;
2219 
2220 #ifndef	__lock_lint
2221 	/*
2222 	 * Solaris-internal stuff for proper operation of kernel threads
2223 	 * with Solaris CPR.
2224 	 */
2225 	CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2226 	    callb_generic_cpr, "fp_job_handler");
2227 #endif
2228 
2229 
2230 	/* Loop forever waiting for work to do */
2231 	for (;;) {
2232 
2233 		mutex_enter(&port->fp_mutex);
2234 
2235 		/*
2236 		 * Sleep if no work to do right now, or if we want
2237 		 * to suspend or power-down.
2238 		 */
2239 		while (port->fp_job_head == NULL ||
2240 		    (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2241 		    FP_SOFT_SUSPEND))) {
2242 			CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2243 			cv_wait(&port->fp_cv, &port->fp_mutex);
2244 			CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2245 		}
2246 
2247 		/*
2248 		 * OK, we've just been woken up, so retrieve the next entry
2249 		 * from the head of the job queue for this local port.
2250 		 */
2251 		job = fctl_deque_job(port);
2252 
2253 		/*
2254 		 * Handle all the fp driver's supported job codes here
2255 		 * in this big honkin' switch.
2256 		 */
2257 		switch (job->job_code) {
2258 		case JOB_PORT_SHUTDOWN:
2259 			/*
2260 			 * fp_port_shutdown() is only called from here. This
2261 			 * will prepare the local port instance (softstate)
2262 			 * for detaching.  This cancels timeout callbacks,
2263 			 * executes LOGOs with remote ports, cleans up tables,
2264 			 * and deallocates data structs.
2265 			 */
2266 			fp_port_shutdown(port, job);
2267 
2268 			/*
2269 			 * This will exit the job thread.
2270 			 */
2271 #ifndef __lock_lint
2272 			CALLB_CPR_EXIT(&(port->fp_cpr_info));
2273 #else
2274 			mutex_exit(&port->fp_mutex);
2275 #endif
2276 			fctl_jobdone(job);
2277 			thread_exit();
2278 
2279 			/* NOTREACHED */
2280 
2281 		case JOB_ATTACH_ULP: {
2282 			/*
2283 			 * This job is spawned in response to a ULP calling
2284 			 * fc_ulp_add().
2285 			 */
2286 
2287 			boolean_t do_attach_ulps = B_TRUE;
2288 
2289 			/*
2290 			 * If fp is detaching, we don't want to call
2291 			 * fp_startup_done as this asynchronous
2292 			 * notification may interfere with the re-attach.
2293 			 */
2294 
2295 			if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2296 			    FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2297 				do_attach_ulps = B_FALSE;
2298 			} else {
2299 				/*
2300 				 * We are going to force the transport
2301 				 * to attach to the ULPs, so set
2302 				 * fp_ulp_attach.  This will keep any
2303 				 * potential detach from occurring until
2304 				 * we are done.
2305 				 */
2306 				port->fp_ulp_attach = 1;
2307 			}
2308 
2309 			mutex_exit(&port->fp_mutex);
2310 
2311 			/*
2312 			 * NOTE: Since we just dropped the mutex, there is now
2313 			 * a race window where the fp_soft_state check above
2314 			 * could change here.  This race is covered because an
2315 			 * additional check was added in the functions hidden
2316 			 * under fp_startup_done().
2317 			 */
2318 			if (do_attach_ulps == B_TRUE) {
2319 				/*
2320 				 * This goes thru a bit of a convoluted call
2321 				 * chain before spawning off a DDI taskq
2322 				 * request to perform the actual attach
2323 				 * operations. Blocking can occur at a number
2324 				 * of points.
2325 				 */
2326 				fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2327 			}
2328 			job->job_result = FC_SUCCESS;
2329 			fctl_jobdone(job);
2330 			break;
2331 		}
2332 
2333 		case JOB_ULP_NOTIFY: {
2334 			/*
2335 			 * Pass state change notifications up to any/all
2336 			 * registered ULPs.
2337 			 */
2338 			uint32_t statec;
2339 
2340 			statec = job->job_ulp_listlen;
2341 			if (statec == FC_STATE_RESET_REQUESTED) {
2342 				port->fp_last_task = port->fp_task;
2343 				port->fp_task = FP_TASK_OFFLINE;
2344 				fp_port_offline(port, 0);
2345 				port->fp_task = port->fp_last_task;
2346 				port->fp_last_task = FP_TASK_IDLE;
2347 			}
2348 
2349 			if (--port->fp_statec_busy == 0) {
2350 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2351 			}
2352 
2353 			mutex_exit(&port->fp_mutex);
2354 
2355 			job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2356 			fctl_jobdone(job);
2357 			break;
2358 		}
2359 
2360 		case JOB_PLOGI_ONE:
2361 			/*
2362 			 * Issue a PLOGI to a single remote port. Multiple
2363 			 * PLOGIs to different remote ports may occur in
2364 			 * parallel.
2365 			 * This can create the fc_remote_port_t if it does not
2366 			 * already exist.
2367 			 */
2368 
2369 			mutex_exit(&port->fp_mutex);
2370 			d_id = (uint32_t *)job->job_private;
2371 			pd = fctl_get_remote_port_by_did(port, *d_id);
2372 
2373 			if (pd) {
2374 				mutex_enter(&pd->pd_mutex);
2375 				if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2376 					pd->pd_login_count++;
2377 					mutex_exit(&pd->pd_mutex);
2378 					job->job_result = FC_SUCCESS;
2379 					fctl_jobdone(job);
2380 					break;
2381 				}
2382 				mutex_exit(&pd->pd_mutex);
2383 			} else {
2384 				mutex_enter(&port->fp_mutex);
2385 				if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2386 					mutex_exit(&port->fp_mutex);
2387 					pd = fp_create_remote_port_by_ns(port,
2388 					    *d_id, KM_SLEEP);
2389 					if (pd == NULL) {
2390 						job->job_result = FC_FAILURE;
2391 						fctl_jobdone(job);
2392 						break;
2393 					}
2394 				} else {
2395 					mutex_exit(&port->fp_mutex);
2396 				}
2397 			}
2398 
2399 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2400 			job->job_counter = 1;
2401 
2402 			rval = fp_port_login(port, *d_id, job,
2403 			    FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2404 
2405 			if (rval != FC_SUCCESS) {
2406 				job->job_result = rval;
2407 				fctl_jobdone(job);
2408 			}
2409 			break;
2410 
2411 		case JOB_LOGO_ONE: {
2412 			/*
2413 			 * Issue a PLOGO to a single remote port. Multiple
2414 			 * PLOGOs to different remote ports may occur in
2415 			 * parallel.
2416 			 */
2417 			fc_remote_port_t *pd;
2418 
2419 #ifndef	__lock_lint
2420 			ASSERT(job->job_counter > 0);
2421 #endif
2422 
2423 			pd = (fc_remote_port_t *)job->job_ulp_pkts;
2424 
2425 			mutex_enter(&pd->pd_mutex);
2426 			if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2427 				mutex_exit(&pd->pd_mutex);
2428 				job->job_result = FC_LOGINREQ;
2429 				mutex_exit(&port->fp_mutex);
2430 				fctl_jobdone(job);
2431 				break;
2432 			}
2433 			if (pd->pd_login_count > 1) {
2434 				pd->pd_login_count--;
2435 				mutex_exit(&pd->pd_mutex);
2436 				job->job_result = FC_SUCCESS;
2437 				mutex_exit(&port->fp_mutex);
2438 				fctl_jobdone(job);
2439 				break;
2440 			}
2441 			mutex_exit(&pd->pd_mutex);
2442 			mutex_exit(&port->fp_mutex);
2443 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2444 			(void) fp_logout(port, pd, job);
2445 			break;
2446 		}
2447 
2448 		case JOB_FCIO_LOGIN:
2449 			/*
2450 			 * PLOGI initiated at ioctl request.
2451 			 */
2452 			mutex_exit(&port->fp_mutex);
2453 			job->job_result =
2454 			    fp_fcio_login(port, job->job_private, job);
2455 			fctl_jobdone(job);
2456 			break;
2457 
2458 		case JOB_FCIO_LOGOUT:
2459 			/*
2460 			 * PLOGO initiated at ioctl request.
2461 			 */
2462 			mutex_exit(&port->fp_mutex);
2463 			job->job_result =
2464 			    fp_fcio_logout(port, job->job_private, job);
2465 			fctl_jobdone(job);
2466 			break;
2467 
2468 		case JOB_PORT_GETMAP:
2469 		case JOB_PORT_GETMAP_PLOGI_ALL: {
2470 			port->fp_last_task = port->fp_task;
2471 			port->fp_task = FP_TASK_GETMAP;
2472 
2473 			switch (port->fp_topology) {
2474 			case FC_TOP_PRIVATE_LOOP:
2475 				job->job_counter = 1;
2476 
2477 				fp_get_loopmap(port, job);
2478 				mutex_exit(&port->fp_mutex);
2479 				fp_jobwait(job);
2480 				fctl_fillout_map(port,
2481 				    (fc_portmap_t **)job->job_private,
2482 				    (uint32_t *)job->job_arg, 1, 0, 0);
2483 				fctl_jobdone(job);
2484 				mutex_enter(&port->fp_mutex);
2485 				break;
2486 
2487 			case FC_TOP_PUBLIC_LOOP:
2488 			case FC_TOP_FABRIC:
2489 				mutex_exit(&port->fp_mutex);
2490 				job->job_counter = 1;
2491 
2492 				job->job_result = fp_ns_getmap(port,
2493 				    job, (fc_portmap_t **)job->job_private,
2494 				    (uint32_t *)job->job_arg,
2495 				    FCTL_GAN_START_ID);
2496 				fctl_jobdone(job);
2497 				mutex_enter(&port->fp_mutex);
2498 				break;
2499 
2500 			case FC_TOP_PT_PT:
2501 				mutex_exit(&port->fp_mutex);
2502 				fctl_fillout_map(port,
2503 				    (fc_portmap_t **)job->job_private,
2504 				    (uint32_t *)job->job_arg, 1, 0, 0);
2505 				fctl_jobdone(job);
2506 				mutex_enter(&port->fp_mutex);
2507 				break;
2508 
2509 			default:
2510 				mutex_exit(&port->fp_mutex);
2511 				fctl_jobdone(job);
2512 				mutex_enter(&port->fp_mutex);
2513 				break;
2514 			}
2515 			port->fp_task = port->fp_last_task;
2516 			port->fp_last_task = FP_TASK_IDLE;
2517 			mutex_exit(&port->fp_mutex);
2518 			break;
2519 		}
2520 
2521 		case JOB_PORT_OFFLINE: {
2522 			fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2523 
2524 			port->fp_last_task = port->fp_task;
2525 			port->fp_task = FP_TASK_OFFLINE;
2526 
2527 			if (port->fp_statec_busy > 2) {
2528 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2529 				fp_port_offline(port, 0);
2530 				if (--port->fp_statec_busy == 0) {
2531 					port->fp_soft_state &=
2532 					    ~FP_SOFT_IN_STATEC_CB;
2533 				}
2534 			} else {
2535 				fp_port_offline(port, 1);
2536 			}
2537 
2538 			port->fp_task = port->fp_last_task;
2539 			port->fp_last_task = FP_TASK_IDLE;
2540 
2541 			mutex_exit(&port->fp_mutex);
2542 
2543 			fctl_jobdone(job);
2544 			break;
2545 		}
2546 
2547 		case JOB_PORT_STARTUP: {
2548 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2549 				if (port->fp_statec_busy > 1) {
2550 					mutex_exit(&port->fp_mutex);
2551 					break;
2552 				}
2553 				mutex_exit(&port->fp_mutex);
2554 
2555 				FP_TRACE(FP_NHEAD2(9, rval),
2556 				    "Topology discovery failed");
2557 				break;
2558 			}
2559 
2560 			/*
2561 			 * Attempt building device handles in case
2562 			 * of private Loop.
2563 			 */
2564 			if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2565 				job->job_counter = 1;
2566 
2567 				fp_get_loopmap(port, job);
2568 				mutex_exit(&port->fp_mutex);
2569 				fp_jobwait(job);
2570 				mutex_enter(&port->fp_mutex);
2571 				if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2572 					ASSERT(port->fp_total_devices == 0);
2573 					port->fp_total_devices =
2574 					    port->fp_dev_count;
2575 				}
2576 			} else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2577 				/*
2578 				 * Hack to avoid state changes going up early
2579 				 */
2580 				port->fp_statec_busy++;
2581 				port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2582 
2583 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2584 				fp_fabric_online(port, job);
2585 				job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2586 			}
2587 			mutex_exit(&port->fp_mutex);
2588 			fctl_jobdone(job);
2589 			break;
2590 		}
2591 
2592 		case JOB_PORT_ONLINE: {
2593 			char		*newtop;
2594 			char		*oldtop;
2595 			uint32_t	old_top;
2596 
2597 			fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2598 
2599 			/*
2600 			 * Bail out early if there are a lot of
2601 			 * state changes in the pipeline
2602 			 */
2603 			if (port->fp_statec_busy > 1) {
2604 				--port->fp_statec_busy;
2605 				mutex_exit(&port->fp_mutex);
2606 				fctl_jobdone(job);
2607 				break;
2608 			}
2609 
2610 			switch (old_top = port->fp_topology) {
2611 			case FC_TOP_PRIVATE_LOOP:
2612 				oldtop = "Private Loop";
2613 				break;
2614 
2615 			case FC_TOP_PUBLIC_LOOP:
2616 				oldtop = "Public Loop";
2617 				break;
2618 
2619 			case FC_TOP_PT_PT:
2620 				oldtop = "Point to Point";
2621 				break;
2622 
2623 			case FC_TOP_FABRIC:
2624 				oldtop = "Fabric";
2625 				break;
2626 
2627 			default:
2628 				oldtop = NULL;
2629 				break;
2630 			}
2631 
2632 			port->fp_last_task = port->fp_task;
2633 			port->fp_task = FP_TASK_ONLINE;
2634 
2635 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2636 
2637 				port->fp_task = port->fp_last_task;
2638 				port->fp_last_task = FP_TASK_IDLE;
2639 
2640 				if (port->fp_statec_busy > 1) {
2641 					--port->fp_statec_busy;
2642 					mutex_exit(&port->fp_mutex);
2643 					break;
2644 				}
2645 
2646 				port->fp_state = FC_STATE_OFFLINE;
2647 
2648 				FP_TRACE(FP_NHEAD2(9, rval),
2649 				    "Topology discovery failed");
2650 
2651 				if (--port->fp_statec_busy == 0) {
2652 					port->fp_soft_state &=
2653 					    ~FP_SOFT_IN_STATEC_CB;
2654 				}
2655 
2656 				if (port->fp_offline_tid == NULL) {
2657 					port->fp_offline_tid =
2658 					    timeout(fp_offline_timeout,
2659 					    (caddr_t)port, fp_offline_ticks);
2660 				}
2661 
2662 				mutex_exit(&port->fp_mutex);
2663 				break;
2664 			}
2665 
2666 			switch (port->fp_topology) {
2667 			case FC_TOP_PRIVATE_LOOP:
2668 				newtop = "Private Loop";
2669 				break;
2670 
2671 			case FC_TOP_PUBLIC_LOOP:
2672 				newtop = "Public Loop";
2673 				break;
2674 
2675 			case FC_TOP_PT_PT:
2676 				newtop = "Point to Point";
2677 				break;
2678 
2679 			case FC_TOP_FABRIC:
2680 				newtop = "Fabric";
2681 				break;
2682 
2683 			default:
2684 				newtop = NULL;
2685 				break;
2686 			}
2687 
2688 			if (oldtop && newtop && strcmp(oldtop, newtop)) {
2689 				fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2690 				    "Change in FC Topology old = %s new = %s",
2691 				    oldtop, newtop);
2692 			}
2693 
2694 			switch (port->fp_topology) {
2695 			case FC_TOP_PRIVATE_LOOP: {
2696 				int orphan = (old_top == FC_TOP_FABRIC ||
2697 				    old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2698 
2699 				mutex_exit(&port->fp_mutex);
2700 				fp_loop_online(port, job, orphan);
2701 				break;
2702 			}
2703 
2704 			case FC_TOP_PUBLIC_LOOP:
2705 				/* FALLTHROUGH */
2706 			case FC_TOP_FABRIC:
2707 				fp_fabric_online(port, job);
2708 				mutex_exit(&port->fp_mutex);
2709 				break;
2710 
2711 			case FC_TOP_PT_PT:
2712 				fp_p2p_online(port, job);
2713 				mutex_exit(&port->fp_mutex);
2714 				break;
2715 
2716 			default:
2717 				if (--port->fp_statec_busy != 0) {
2718 					/*
2719 					 * Watch curiously at what the next
2720 					 * state transition can do.
2721 					 */
2722 					mutex_exit(&port->fp_mutex);
2723 					break;
2724 				}
2725 
2726 				FP_TRACE(FP_NHEAD2(9, 0),
2727 				    "Topology Unknown, Offlining the port..");
2728 
2729 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2730 				port->fp_state = FC_STATE_OFFLINE;
2731 
2732 				if (port->fp_offline_tid == NULL) {
2733 					port->fp_offline_tid =
2734 					    timeout(fp_offline_timeout,
2735 					    (caddr_t)port, fp_offline_ticks);
2736 				}
2737 				mutex_exit(&port->fp_mutex);
2738 				break;
2739 			}
2740 
2741 			mutex_enter(&port->fp_mutex);
2742 
2743 			port->fp_task = port->fp_last_task;
2744 			port->fp_last_task = FP_TASK_IDLE;
2745 
2746 			mutex_exit(&port->fp_mutex);
2747 
2748 			fctl_jobdone(job);
2749 			break;
2750 		}
2751 
2752 		case JOB_PLOGI_GROUP: {
2753 			mutex_exit(&port->fp_mutex);
2754 			fp_plogi_group(port, job);
2755 			break;
2756 		}
2757 
2758 		case JOB_UNSOL_REQUEST: {
2759 			mutex_exit(&port->fp_mutex);
2760 			fp_handle_unsol_buf(port,
2761 			    (fc_unsol_buf_t *)job->job_private, job);
2762 			fctl_dealloc_job(job);
2763 			break;
2764 		}
2765 
2766 		case JOB_NS_CMD: {
2767 			fctl_ns_req_t *ns_cmd;
2768 
2769 			mutex_exit(&port->fp_mutex);
2770 
2771 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2772 			ns_cmd = (fctl_ns_req_t *)job->job_private;
2773 			if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2774 			    ns_cmd->ns_cmd_code > NS_DA_ID) {
2775 				job->job_result = FC_BADCMD;
2776 				fctl_jobdone(job);
2777 				break;
2778 			}
2779 
2780 			if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2781 				if (ns_cmd->ns_pd != NULL) {
2782 					job->job_result = FC_BADOBJECT;
2783 					fctl_jobdone(job);
2784 					break;
2785 				}
2786 
2787 				job->job_counter = 1;
2788 
2789 				rval = fp_ns_reg(port, ns_cmd->ns_pd,
2790 				    ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2791 
2792 				if (rval != FC_SUCCESS) {
2793 					job->job_result = rval;
2794 					fctl_jobdone(job);
2795 				}
2796 				break;
2797 			}
2798 			job->job_result = FC_SUCCESS;
2799 			job->job_counter = 1;
2800 
2801 			rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2802 			if (rval != FC_SUCCESS) {
2803 				fctl_jobdone(job);
2804 			}
2805 			break;
2806 		}
2807 
2808 		case JOB_LINK_RESET: {
2809 			la_wwn_t *pwwn;
2810 			uint32_t topology;
2811 
2812 			pwwn = (la_wwn_t *)job->job_private;
2813 			ASSERT(pwwn != NULL);
2814 
2815 			topology = port->fp_topology;
2816 			mutex_exit(&port->fp_mutex);
2817 
2818 			if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2819 			    topology == FC_TOP_PRIVATE_LOOP) {
2820 				job->job_flags |= JOB_TYPE_FP_ASYNC;
2821 				rval = port->fp_fca_tran->fca_reset(
2822 				    port->fp_fca_handle, FC_FCA_LINK_RESET);
2823 				job->job_result = rval;
2824 				fp_jobdone(job);
2825 			} else {
2826 				ASSERT((job->job_flags &
2827 				    JOB_TYPE_FP_ASYNC) == 0);
2828 
2829 				if (FC_IS_TOP_SWITCH(topology)) {
2830 					rval = fp_remote_lip(port, pwwn,
2831 					    KM_SLEEP, job);
2832 				} else {
2833 					rval = FC_FAILURE;
2834 				}
2835 				if (rval != FC_SUCCESS) {
2836 					job->job_result = rval;
2837 				}
2838 				fctl_jobdone(job);
2839 			}
2840 			break;
2841 		}
2842 
2843 		default:
2844 			mutex_exit(&port->fp_mutex);
2845 			job->job_result = FC_BADCMD;
2846 			fctl_jobdone(job);
2847 			break;
2848 		}
2849 	}
2850 	/* NOTREACHED */
2851 }
2852 
2853 
2854 /*
2855  * Perform FC port bring up initialization
2856  */
2857 static int
2858 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2859 {
2860 	int		rval;
2861 	uint32_t	state;
2862 	uint32_t	src_id;
2863 	fc_lilpmap_t	*lilp_map;
2864 
2865 	ASSERT(MUTEX_HELD(&port->fp_mutex));
2866 	ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2867 
2868 	FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2869 	    " port=%p, job=%p", port, job);
2870 
2871 	port->fp_topology = FC_TOP_UNKNOWN;
2872 	port->fp_port_id.port_id = 0;
2873 	state = FC_PORT_STATE_MASK(port->fp_state);
2874 
2875 	if (state == FC_STATE_OFFLINE) {
2876 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2877 		job->job_result = FC_OFFLINE;
2878 		mutex_exit(&port->fp_mutex);
2879 		fctl_jobdone(job);
2880 		mutex_enter(&port->fp_mutex);
2881 		return (FC_OFFLINE);
2882 	}
2883 
2884 	if (state == FC_STATE_LOOP) {
2885 		port->fp_port_type.port_type = FC_NS_PORT_NL;
2886 		mutex_exit(&port->fp_mutex);
2887 
2888 		lilp_map = &port->fp_lilp_map;
2889 		if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2890 			job->job_result = FC_FAILURE;
2891 			fctl_jobdone(job);
2892 
2893 			FP_TRACE(FP_NHEAD1(9, rval),
2894 			    "LILP map Invalid or not present");
2895 			mutex_enter(&port->fp_mutex);
2896 			return (FC_FAILURE);
2897 		}
2898 
2899 		if (lilp_map->lilp_length == 0) {
2900 			job->job_result = FC_NO_MAP;
2901 			fctl_jobdone(job);
2902 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2903 			    "LILP map length zero");
2904 			mutex_enter(&port->fp_mutex);
2905 			return (FC_NO_MAP);
2906 		}
2907 		src_id = lilp_map->lilp_myalpa & 0xFF;
2908 	} else {
2909 		fc_remote_port_t	*pd;
2910 		fc_fca_pm_t		pm;
2911 		fc_fca_p2p_info_t	p2p_info;
2912 		int			pd_recepient;
2913 
2914 		/*
2915 		 * Get P2P remote port info if possible
2916 		 */
2917 		bzero((caddr_t)&pm, sizeof (pm));
2918 
2919 		pm.pm_cmd_flags = FC_FCA_PM_READ;
2920 		pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2921 		pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2922 		pm.pm_data_buf = (caddr_t)&p2p_info;
2923 
2924 		rval = port->fp_fca_tran->fca_port_manage(
2925 		    port->fp_fca_handle, &pm);
2926 
2927 		if (rval == FC_SUCCESS) {
2928 			port->fp_port_id.port_id = p2p_info.fca_d_id;
2929 			port->fp_port_type.port_type = FC_NS_PORT_N;
2930 			port->fp_topology = FC_TOP_PT_PT;
2931 			port->fp_total_devices = 1;
2932 			pd_recepient = fctl_wwn_cmp(
2933 			    &port->fp_service_params.nport_ww_name,
2934 			    &p2p_info.pwwn) < 0 ?
2935 			    PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2936 			mutex_exit(&port->fp_mutex);
2937 			pd = fctl_create_remote_port(port,
2938 			    &p2p_info.nwwn,
2939 			    &p2p_info.pwwn,
2940 			    p2p_info.d_id,
2941 			    pd_recepient, KM_NOSLEEP);
2942 			FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2943 			    " P2P port=%p pd=%p", port, pd);
2944 			mutex_enter(&port->fp_mutex);
2945 			return (FC_SUCCESS);
2946 		}
2947 		port->fp_port_type.port_type = FC_NS_PORT_N;
2948 		mutex_exit(&port->fp_mutex);
2949 		src_id = 0;
2950 	}
2951 
2952 	job->job_counter = 1;
2953 	job->job_result = FC_SUCCESS;
2954 
2955 	if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2956 	    KM_SLEEP)) != FC_SUCCESS) {
2957 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2958 		job->job_result = FC_FAILURE;
2959 		fctl_jobdone(job);
2960 
2961 		mutex_enter(&port->fp_mutex);
2962 		if (port->fp_statec_busy <= 1) {
2963 			mutex_exit(&port->fp_mutex);
2964 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
2965 			    "Couldn't transport FLOGI");
2966 			mutex_enter(&port->fp_mutex);
2967 		}
2968 		return (FC_FAILURE);
2969 	}
2970 
2971 	fp_jobwait(job);
2972 
2973 	mutex_enter(&port->fp_mutex);
2974 	if (job->job_result == FC_SUCCESS) {
2975 		if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2976 			mutex_exit(&port->fp_mutex);
2977 			fp_ns_init(port, job, KM_SLEEP);
2978 			mutex_enter(&port->fp_mutex);
2979 		}
2980 	} else {
2981 		if (state == FC_STATE_LOOP) {
2982 			port->fp_topology = FC_TOP_PRIVATE_LOOP;
2983 			port->fp_port_id.port_id =
2984 			    port->fp_lilp_map.lilp_myalpa & 0xFF;
2985 		}
2986 	}
2987 
2988 	FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
2989 	    port, job);
2990 
2991 	return (FC_SUCCESS);
2992 }
2993 
2994 
2995 /*
2996  * Perform ULP invocations following FC port startup
2997  */
2998 /* ARGSUSED */
2999 static void
3000 fp_startup_done(opaque_t arg, uchar_t result)
3001 {
3002 	fc_local_port_t *port = arg;
3003 
3004 	fp_attach_ulps(port, FC_CMD_ATTACH);
3005 
3006 	FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3007 }
3008 
3009 
3010 /*
3011  * Perform ULP port attach
3012  */
3013 static void
3014 fp_ulp_port_attach(void *arg)
3015 {
3016 	fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3017 	fc_local_port_t	 *port = att->att_port;
3018 
3019 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3020 	    " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3021 
3022 	fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3023 
3024 	if (att->att_need_pm_idle == B_TRUE) {
3025 		fctl_idle_port(port);
3026 	}
3027 
3028 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3029 	    " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3030 
3031 	mutex_enter(&att->att_port->fp_mutex);
3032 	att->att_port->fp_ulp_attach = 0;
3033 
3034 	port->fp_task = port->fp_last_task;
3035 	port->fp_last_task = FP_TASK_IDLE;
3036 
3037 	cv_signal(&att->att_port->fp_attach_cv);
3038 
3039 	mutex_exit(&att->att_port->fp_mutex);
3040 
3041 	kmem_free(att, sizeof (fp_soft_attach_t));
3042 }
3043 
3044 /*
3045  * Entry point to funnel all requests down to FCAs
3046  */
3047 static int
3048 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3049 {
3050 	int rval;
3051 
3052 	mutex_enter(&port->fp_mutex);
3053 	if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3054 	    (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3055 	    FC_STATE_OFFLINE))) {
3056 		/*
3057 		 * This means there is more than one state change
3058 		 * at this point of time - Since they are processed
3059 		 * serially, any processing of the current one should
3060 		 * be failed, failed and move up in processing the next
3061 		 */
3062 		cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3063 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3064 		if (cmd->cmd_job) {
3065 			/*
3066 			 * A state change that is going to be invalidated
3067 			 * by another one already in the port driver's queue
3068 			 * need not go up to all ULPs. This will minimize
3069 			 * needless processing and ripples in ULP modules
3070 			 */
3071 			cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3072 		}
3073 		mutex_exit(&port->fp_mutex);
3074 		return (FC_STATEC_BUSY);
3075 	}
3076 
3077 	if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3078 		cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3079 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3080 		mutex_exit(&port->fp_mutex);
3081 
3082 		return (FC_OFFLINE);
3083 	}
3084 	mutex_exit(&port->fp_mutex);
3085 
3086 	rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3087 	if (rval != FC_SUCCESS) {
3088 		if (rval == FC_TRAN_BUSY) {
3089 			cmd->cmd_retry_interval = fp_retry_delay;
3090 			rval = fp_retry_cmd(&cmd->cmd_pkt);
3091 			if (rval == FC_FAILURE) {
3092 				cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3093 			}
3094 		}
3095 	} else {
3096 		mutex_enter(&port->fp_mutex);
3097 		port->fp_out_fpcmds++;
3098 		mutex_exit(&port->fp_mutex);
3099 	}
3100 
3101 	return (rval);
3102 }
3103 
3104 
3105 /*
3106  * Each time a timeout kicks in, walk the wait queue, decrement the
3107  * the retry_interval, when the retry_interval becomes less than
3108  * or equal to zero, re-transport the command: If the re-transport
3109  * fails with BUSY, enqueue the command in the wait queue.
3110  *
3111  * In order to prevent looping forever because of commands enqueued
3112  * from within this function itself, save the current tail pointer
3113  * (in cur_tail) and exit the loop after serving this command.
3114  */
3115 static void
3116 fp_resendcmd(void *port_handle)
3117 {
3118 	int		rval;
3119 	fc_local_port_t	*port;
3120 	fp_cmd_t	*cmd;
3121 	fp_cmd_t	*cur_tail;
3122 
3123 	port = port_handle;
3124 	mutex_enter(&port->fp_mutex);
3125 	cur_tail = port->fp_wait_tail;
3126 	mutex_exit(&port->fp_mutex);
3127 
3128 	while ((cmd = fp_deque_cmd(port)) != NULL) {
3129 		cmd->cmd_retry_interval -= fp_retry_ticker;
3130 		/* Check if we are detaching */
3131 		if (port->fp_soft_state &
3132 		    (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3133 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3134 			cmd->cmd_pkt.pkt_reason = 0;
3135 			fp_iodone(cmd);
3136 		} else if (cmd->cmd_retry_interval <= 0) {
3137 			rval = cmd->cmd_transport(port->fp_fca_handle,
3138 			    &cmd->cmd_pkt);
3139 
3140 			if (rval != FC_SUCCESS) {
3141 				if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3142 					if (--cmd->cmd_retry_count) {
3143 						fp_enque_cmd(port, cmd);
3144 						if (cmd == cur_tail) {
3145 							break;
3146 						}
3147 						continue;
3148 					}
3149 					cmd->cmd_pkt.pkt_state =
3150 					    FC_PKT_TRAN_BSY;
3151 				} else {
3152 					cmd->cmd_pkt.pkt_state =
3153 					    FC_PKT_TRAN_ERROR;
3154 				}
3155 				cmd->cmd_pkt.pkt_reason = 0;
3156 				fp_iodone(cmd);
3157 			} else {
3158 				mutex_enter(&port->fp_mutex);
3159 				port->fp_out_fpcmds++;
3160 				mutex_exit(&port->fp_mutex);
3161 			}
3162 		} else {
3163 			fp_enque_cmd(port, cmd);
3164 		}
3165 
3166 		if (cmd == cur_tail) {
3167 			break;
3168 		}
3169 	}
3170 
3171 	mutex_enter(&port->fp_mutex);
3172 	if (port->fp_wait_head) {
3173 		timeout_id_t tid;
3174 
3175 		mutex_exit(&port->fp_mutex);
3176 		tid = timeout(fp_resendcmd, (caddr_t)port,
3177 		    fp_retry_ticks);
3178 		mutex_enter(&port->fp_mutex);
3179 		port->fp_wait_tid = tid;
3180 	} else {
3181 		port->fp_wait_tid = NULL;
3182 	}
3183 	mutex_exit(&port->fp_mutex);
3184 }
3185 
3186 
3187 /*
3188  * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3189  *
3190  * Yes, as you can see below, cmd_retry_count is used here too.	 That means
3191  * the retries for BUSY are less if there were transport failures (transport
3192  * failure means fca_transport failure). The goal is not to exceed overall
3193  * retries set in the cmd_retry_count (whatever may be the reason for retry)
3194  *
3195  * Return Values:
3196  *	FC_SUCCESS
3197  *	FC_FAILURE
3198  */
3199 static int
3200 fp_retry_cmd(fc_packet_t *pkt)
3201 {
3202 	fp_cmd_t *cmd;
3203 
3204 	cmd = pkt->pkt_ulp_private;
3205 
3206 	if (--cmd->cmd_retry_count) {
3207 		fp_enque_cmd(cmd->cmd_port, cmd);
3208 		return (FC_SUCCESS);
3209 	} else {
3210 		return (FC_FAILURE);
3211 	}
3212 }
3213 
3214 
3215 /*
3216  * Queue up FC packet for deferred retry
3217  */
3218 static void
3219 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3220 {
3221 	timeout_id_t tid;
3222 
3223 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3224 
3225 #ifdef	DEBUG
3226 	fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3227 	    "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3228 #endif
3229 
3230 	mutex_enter(&port->fp_mutex);
3231 	if (port->fp_wait_tail) {
3232 		port->fp_wait_tail->cmd_next = cmd;
3233 		port->fp_wait_tail = cmd;
3234 	} else {
3235 		ASSERT(port->fp_wait_head == NULL);
3236 		port->fp_wait_head = port->fp_wait_tail = cmd;
3237 		if (port->fp_wait_tid == NULL) {
3238 			mutex_exit(&port->fp_mutex);
3239 			tid = timeout(fp_resendcmd, (caddr_t)port,
3240 			    fp_retry_ticks);
3241 			mutex_enter(&port->fp_mutex);
3242 			port->fp_wait_tid = tid;
3243 		}
3244 	}
3245 	mutex_exit(&port->fp_mutex);
3246 }
3247 
3248 
3249 /*
3250  * Handle all RJT codes
3251  */
3252 static int
3253 fp_handle_reject(fc_packet_t *pkt)
3254 {
3255 	int		rval = FC_FAILURE;
3256 	uchar_t		next_class;
3257 	fp_cmd_t	*cmd;
3258 	fc_local_port_t *port;
3259 
3260 	cmd = pkt->pkt_ulp_private;
3261 	port = cmd->cmd_port;
3262 
3263 	switch (pkt->pkt_state) {
3264 	case FC_PKT_FABRIC_RJT:
3265 	case FC_PKT_NPORT_RJT:
3266 		if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3267 			next_class = fp_get_nextclass(cmd->cmd_port,
3268 			    FC_TRAN_CLASS(pkt->pkt_tran_flags));
3269 
3270 			if (next_class == FC_TRAN_CLASS_INVALID) {
3271 				return (rval);
3272 			}
3273 			pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3274 			pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3275 
3276 			rval = fp_sendcmd(cmd->cmd_port, cmd,
3277 			    cmd->cmd_port->fp_fca_handle);
3278 
3279 			if (rval != FC_SUCCESS) {
3280 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
3281 			}
3282 		}
3283 		break;
3284 
3285 	case FC_PKT_LS_RJT:
3286 	case FC_PKT_BA_RJT:
3287 		if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3288 		    (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3289 			cmd->cmd_retry_interval = fp_retry_delay;
3290 			rval = fp_retry_cmd(pkt);
3291 		}
3292 		break;
3293 
3294 	case FC_PKT_FS_RJT:
3295 		if (pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) {
3296 			cmd->cmd_retry_interval = fp_retry_delay;
3297 			rval = fp_retry_cmd(pkt);
3298 		}
3299 		break;
3300 
3301 	case FC_PKT_LOCAL_RJT:
3302 		if (pkt->pkt_reason == FC_REASON_QFULL) {
3303 			cmd->cmd_retry_interval = fp_retry_delay;
3304 			rval = fp_retry_cmd(pkt);
3305 		}
3306 		break;
3307 
3308 	default:
3309 		FP_TRACE(FP_NHEAD1(1, 0),
3310 		    "fp_handle_reject(): Invalid pkt_state");
3311 		break;
3312 	}
3313 
3314 	return (rval);
3315 }
3316 
3317 
3318 /*
3319  * Return the next class of service supported by the FCA
3320  */
3321 static uchar_t
3322 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3323 {
3324 	uchar_t next_class;
3325 
3326 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3327 
3328 	switch (cur_class) {
3329 	case FC_TRAN_CLASS_INVALID:
3330 		if (port->fp_cos & FC_NS_CLASS1) {
3331 			next_class = FC_TRAN_CLASS1;
3332 			break;
3333 		}
3334 		/* FALLTHROUGH */
3335 
3336 	case FC_TRAN_CLASS1:
3337 		if (port->fp_cos & FC_NS_CLASS2) {
3338 			next_class = FC_TRAN_CLASS2;
3339 			break;
3340 		}
3341 		/* FALLTHROUGH */
3342 
3343 	case FC_TRAN_CLASS2:
3344 		if (port->fp_cos & FC_NS_CLASS3) {
3345 			next_class = FC_TRAN_CLASS3;
3346 			break;
3347 		}
3348 		/* FALLTHROUGH */
3349 
3350 	case FC_TRAN_CLASS3:
3351 	default:
3352 		next_class = FC_TRAN_CLASS_INVALID;
3353 		break;
3354 	}
3355 
3356 	return (next_class);
3357 }
3358 
3359 
3360 /*
3361  * Determine if a class of service is supported by the FCA
3362  */
3363 static int
3364 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3365 {
3366 	int rval;
3367 
3368 	switch (tran_class) {
3369 	case FC_TRAN_CLASS1:
3370 		if (cos & FC_NS_CLASS1) {
3371 			rval = FC_SUCCESS;
3372 		} else {
3373 			rval = FC_FAILURE;
3374 		}
3375 		break;
3376 
3377 	case FC_TRAN_CLASS2:
3378 		if (cos & FC_NS_CLASS2) {
3379 			rval = FC_SUCCESS;
3380 		} else {
3381 			rval = FC_FAILURE;
3382 		}
3383 		break;
3384 
3385 	case FC_TRAN_CLASS3:
3386 		if (cos & FC_NS_CLASS3) {
3387 			rval = FC_SUCCESS;
3388 		} else {
3389 			rval = FC_FAILURE;
3390 		}
3391 		break;
3392 
3393 	default:
3394 		rval = FC_FAILURE;
3395 		break;
3396 	}
3397 
3398 	return (rval);
3399 }
3400 
3401 
3402 /*
3403  * Dequeue FC packet for retry
3404  */
3405 static fp_cmd_t *
3406 fp_deque_cmd(fc_local_port_t *port)
3407 {
3408 	fp_cmd_t *cmd;
3409 
3410 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3411 
3412 	mutex_enter(&port->fp_mutex);
3413 
3414 	if (port->fp_wait_head == NULL) {
3415 		/*
3416 		 * To avoid races, NULL the fp_wait_tid as
3417 		 * we are about to exit the timeout thread.
3418 		 */
3419 		port->fp_wait_tid = NULL;
3420 		mutex_exit(&port->fp_mutex);
3421 		return (NULL);
3422 	}
3423 
3424 	cmd = port->fp_wait_head;
3425 	port->fp_wait_head = cmd->cmd_next;
3426 	cmd->cmd_next = NULL;
3427 
3428 	if (port->fp_wait_head == NULL) {
3429 		port->fp_wait_tail = NULL;
3430 	}
3431 	mutex_exit(&port->fp_mutex);
3432 
3433 	return (cmd);
3434 }
3435 
3436 
3437 /*
3438  * Wait for job completion
3439  */
3440 static void
3441 fp_jobwait(job_request_t *job)
3442 {
3443 	sema_p(&job->job_port_sema);
3444 }
3445 
3446 
3447 /*
3448  * Convert FC packet state to FC errno
3449  */
3450 int
3451 fp_state_to_rval(uchar_t state)
3452 {
3453 	int count;
3454 
3455 	for (count = 0; count < sizeof (fp_xlat) /
3456 	    sizeof (fp_xlat[0]); count++) {
3457 		if (fp_xlat[count].xlat_state == state) {
3458 			return (fp_xlat[count].xlat_rval);
3459 		}
3460 	}
3461 
3462 	return (FC_FAILURE);
3463 }
3464 
3465 
3466 /*
3467  * For Synchronous I/O requests, the caller is
3468  * expected to do fctl_jobdone(if necessary)
3469  *
3470  * We want to preserve at least one failure in the
3471  * job_result if it happens.
3472  *
3473  */
3474 static void
3475 fp_iodone(fp_cmd_t *cmd)
3476 {
3477 	fc_packet_t		*ulp_pkt = cmd->cmd_ulp_pkt;
3478 	job_request_t		*job = cmd->cmd_job;
3479 	fc_remote_port_t	*pd = cmd->cmd_pkt.pkt_pd;
3480 
3481 	ASSERT(job != NULL);
3482 	ASSERT(cmd->cmd_port != NULL);
3483 	ASSERT(&cmd->cmd_pkt != NULL);
3484 
3485 	mutex_enter(&job->job_mutex);
3486 	if (job->job_result == FC_SUCCESS) {
3487 		job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3488 	}
3489 	mutex_exit(&job->job_mutex);
3490 
3491 	if (pd) {
3492 		mutex_enter(&pd->pd_mutex);
3493 		pd->pd_flags = PD_IDLE;
3494 		mutex_exit(&pd->pd_mutex);
3495 	}
3496 
3497 	if (ulp_pkt) {
3498 		if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3499 		    FP_IS_PKT_ERROR(ulp_pkt)) {
3500 			fc_local_port_t		*port;
3501 			fc_remote_node_t	*node;
3502 
3503 			port = cmd->cmd_port;
3504 
3505 			mutex_enter(&pd->pd_mutex);
3506 			pd->pd_state = PORT_DEVICE_INVALID;
3507 			pd->pd_ref_count--;
3508 			node = pd->pd_remote_nodep;
3509 			mutex_exit(&pd->pd_mutex);
3510 
3511 			ASSERT(node != NULL);
3512 			ASSERT(port != NULL);
3513 
3514 			if (fctl_destroy_remote_port(port, pd) == 0) {
3515 				fctl_destroy_remote_node(node);
3516 			}
3517 
3518 			ulp_pkt->pkt_pd = NULL;
3519 		}
3520 
3521 		ulp_pkt->pkt_comp(ulp_pkt);
3522 	}
3523 
3524 	fp_free_pkt(cmd);
3525 	fp_jobdone(job);
3526 }
3527 
3528 
3529 /*
3530  * Job completion handler
3531  */
3532 static void
3533 fp_jobdone(job_request_t *job)
3534 {
3535 	mutex_enter(&job->job_mutex);
3536 	ASSERT(job->job_counter > 0);
3537 
3538 	if (--job->job_counter != 0) {
3539 		mutex_exit(&job->job_mutex);
3540 		return;
3541 	}
3542 
3543 	if (job->job_ulp_pkts) {
3544 		ASSERT(job->job_ulp_listlen > 0);
3545 		kmem_free(job->job_ulp_pkts,
3546 		    sizeof (fc_packet_t *) * job->job_ulp_listlen);
3547 	}
3548 
3549 	if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3550 		mutex_exit(&job->job_mutex);
3551 		fctl_jobdone(job);
3552 	} else {
3553 		mutex_exit(&job->job_mutex);
3554 		sema_v(&job->job_port_sema);
3555 	}
3556 }
3557 
3558 
3559 /*
3560  * Try to perform shutdown of a port during a detach. No return
3561  * value since the detach should not fail because the port shutdown
3562  * failed.
3563  */
3564 static void
3565 fp_port_shutdown(fc_local_port_t *port, job_request_t *job)
3566 {
3567 	int			index;
3568 	int			count;
3569 	int			flags;
3570 	fp_cmd_t		*cmd;
3571 	struct pwwn_hash	*head;
3572 	fc_remote_port_t	*pd;
3573 
3574 	ASSERT(MUTEX_HELD(&port->fp_mutex));
3575 
3576 	job->job_result = FC_SUCCESS;
3577 
3578 	if (port->fp_taskq) {
3579 		/*
3580 		 * We must release the mutex here to ensure that other
3581 		 * potential jobs can complete their processing.  Many
3582 		 * also need this mutex.
3583 		 */
3584 		mutex_exit(&port->fp_mutex);
3585 		taskq_wait(port->fp_taskq);
3586 		mutex_enter(&port->fp_mutex);
3587 	}
3588 
3589 	if (port->fp_offline_tid) {
3590 		timeout_id_t tid;
3591 
3592 		tid = port->fp_offline_tid;
3593 		port->fp_offline_tid = NULL;
3594 		mutex_exit(&port->fp_mutex);
3595 		(void) untimeout(tid);
3596 		mutex_enter(&port->fp_mutex);
3597 	}
3598 
3599 	if (port->fp_wait_tid) {
3600 		timeout_id_t tid;
3601 
3602