1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * NOT a DDI compliant Sun Fibre Channel port driver(fp)
25  *
26  */
27 
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/param.h>
31 #include <sys/errno.h>
32 #include <sys/uio.h>
33 #include <sys/buf.h>
34 #include <sys/modctl.h>
35 #include <sys/open.h>
36 #include <sys/file.h>
37 #include <sys/kmem.h>
38 #include <sys/poll.h>
39 #include <sys/conf.h>
40 #include <sys/thread.h>
41 #include <sys/var.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stat.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/promif.h>
47 #include <sys/nvpair.h>
48 #include <sys/byteorder.h>
49 #include <sys/scsi/scsi.h>
50 #include <sys/fibre-channel/fc.h>
51 #include <sys/fibre-channel/impl/fc_ulpif.h>
52 #include <sys/fibre-channel/impl/fc_fcaif.h>
53 #include <sys/fibre-channel/impl/fctl_private.h>
54 #include <sys/fibre-channel/impl/fc_portif.h>
55 #include <sys/fibre-channel/impl/fp.h>
56 
57 /* These are defined in fctl.c! */
58 extern int did_table_size;
59 extern int pwwn_table_size;
60 
61 static struct cb_ops fp_cb_ops = {
62 	fp_open,			/* open */
63 	fp_close,			/* close */
64 	nodev,				/* strategy */
65 	nodev,				/* print */
66 	nodev,				/* dump */
67 	nodev,				/* read */
68 	nodev,				/* write */
69 	fp_ioctl,			/* ioctl */
70 	nodev,				/* devmap */
71 	nodev,				/* mmap */
72 	nodev,				/* segmap */
73 	nochpoll,			/* chpoll */
74 	ddi_prop_op,			/* cb_prop_op */
75 	0,				/* streamtab */
76 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
77 	CB_REV,				/* rev */
78 	nodev,				/* aread */
79 	nodev				/* awrite */
80 };
81 
82 static struct dev_ops fp_ops = {
83 	DEVO_REV,			/* build revision */
84 	0,				/* reference count */
85 	fp_getinfo,			/* getinfo */
86 	nulldev,			/* identify - Obsoleted */
87 	nulldev,			/* probe */
88 	fp_attach,			/* attach */
89 	fp_detach,			/* detach */
90 	nodev,				/* reset */
91 	&fp_cb_ops,			/* cb_ops */
92 	NULL,				/* bus_ops */
93 	fp_power,			/* power */
94 	ddi_quiesce_not_needed		/* quiesce */
95 };
96 
97 #define	FP_VERSION		"20091123-1.101"
98 #define	FP_NAME_VERSION		"SunFC Port v" FP_VERSION
99 
100 char *fp_version = FP_NAME_VERSION;
101 
102 static struct modldrv modldrv = {
103 	&mod_driverops,			/* Type of Module */
104 	FP_NAME_VERSION,		/* Name/Version of fp */
105 	&fp_ops				/* driver ops */
106 };
107 
108 static struct modlinkage modlinkage = {
109 	MODREV_1,	/* Rev of the loadable modules system */
110 	&modldrv,	/* NULL terminated list of */
111 	NULL		/* Linkage structures */
112 };
113 
114 
115 
116 static uint16_t ns_reg_cmds[] = {
117 	NS_RPN_ID,
118 	NS_RNN_ID,
119 	NS_RCS_ID,
120 	NS_RFT_ID,
121 	NS_RPT_ID,
122 	NS_RSPN_ID,
123 	NS_RSNN_NN
124 };
125 
126 struct fp_xlat {
127 	uchar_t	xlat_state;
128 	int	xlat_rval;
129 } fp_xlat [] = {
130 	{ FC_PKT_SUCCESS,	FC_SUCCESS },
131 	{ FC_PKT_REMOTE_STOP,	FC_FAILURE },
132 	{ FC_PKT_LOCAL_RJT,	FC_FAILURE },
133 	{ FC_PKT_NPORT_RJT,	FC_ELS_PREJECT },
134 	{ FC_PKT_FABRIC_RJT,	FC_ELS_FREJECT },
135 	{ FC_PKT_LOCAL_BSY,	FC_TRAN_BUSY },
136 	{ FC_PKT_TRAN_BSY,	FC_TRAN_BUSY },
137 	{ FC_PKT_NPORT_BSY,	FC_PBUSY },
138 	{ FC_PKT_FABRIC_BSY,	FC_FBUSY },
139 	{ FC_PKT_LS_RJT,	FC_FAILURE },
140 	{ FC_PKT_BA_RJT,	FC_FAILURE },
141 	{ FC_PKT_TIMEOUT,	FC_FAILURE },
142 	{ FC_PKT_TRAN_ERROR,	FC_TRANSPORT_ERROR },
143 	{ FC_PKT_FAILURE,	FC_FAILURE },
144 	{ FC_PKT_PORT_OFFLINE,	FC_OFFLINE }
145 };
146 
147 static uchar_t fp_valid_alpas[] = {
148 	0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
149 	0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
150 	0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
151 	0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
152 	0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
153 	0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
154 	0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
155 	0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
156 	0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
157 	0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
158 	0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
159 	0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
160 	0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
161 	0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
162 };
163 
164 static struct fp_perms {
165 	uint16_t	fp_ioctl_cmd;
166 	uchar_t		fp_open_flag;
167 } fp_perm_list [] = {
168 	{ FCIO_GET_NUM_DEVS,		FP_OPEN },
169 	{ FCIO_GET_DEV_LIST,		FP_OPEN },
170 	{ FCIO_GET_SYM_PNAME,		FP_OPEN },
171 	{ FCIO_GET_SYM_NNAME,		FP_OPEN },
172 	{ FCIO_SET_SYM_PNAME,		FP_EXCL },
173 	{ FCIO_SET_SYM_NNAME,		FP_EXCL },
174 	{ FCIO_GET_LOGI_PARAMS,		FP_OPEN },
175 	{ FCIO_DEV_LOGIN,		FP_EXCL },
176 	{ FCIO_DEV_LOGOUT,		FP_EXCL },
177 	{ FCIO_GET_STATE,		FP_OPEN },
178 	{ FCIO_DEV_REMOVE,		FP_EXCL },
179 	{ FCIO_GET_FCODE_REV,		FP_OPEN },
180 	{ FCIO_GET_FW_REV,		FP_OPEN },
181 	{ FCIO_GET_DUMP_SIZE,		FP_OPEN },
182 	{ FCIO_FORCE_DUMP,		FP_EXCL },
183 	{ FCIO_GET_DUMP,		FP_OPEN },
184 	{ FCIO_GET_TOPOLOGY,		FP_OPEN },
185 	{ FCIO_RESET_LINK,		FP_EXCL },
186 	{ FCIO_RESET_HARD,		FP_EXCL },
187 	{ FCIO_RESET_HARD_CORE,		FP_EXCL },
188 	{ FCIO_DIAG,			FP_OPEN },
189 	{ FCIO_NS,			FP_EXCL },
190 	{ FCIO_DOWNLOAD_FW,		FP_EXCL },
191 	{ FCIO_DOWNLOAD_FCODE,		FP_EXCL },
192 	{ FCIO_LINK_STATUS,		FP_OPEN },
193 	{ FCIO_GET_HOST_PARAMS,		FP_OPEN },
194 	{ FCIO_GET_NODE_ID,		FP_OPEN },
195 	{ FCIO_SET_NODE_ID,		FP_EXCL },
196 	{ FCIO_SEND_NODE_ID,		FP_OPEN },
197 	{ FCIO_GET_ADAPTER_ATTRIBUTES,	FP_OPEN },
198 	{ FCIO_GET_OTHER_ADAPTER_PORTS,	FP_OPEN },
199 	{ FCIO_GET_ADAPTER_PORT_ATTRIBUTES,	FP_OPEN },
200 	{ FCIO_GET_DISCOVERED_PORT_ATTRIBUTES,	FP_OPEN },
201 	{ FCIO_GET_PORT_ATTRIBUTES,	FP_OPEN },
202 	{ FCIO_GET_ADAPTER_PORT_STATS,	FP_OPEN },
203 	{ FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
204 	{ FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
205 	{ FCIO_DELETE_NPIV_PORT, FP_OPEN },
206 	{ FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
207 	{ FCIO_CREATE_NPIV_PORT, FP_OPEN },
208 	{ FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
209 };
210 
211 static char *fp_pm_comps[] = {
212 	"NAME=FC Port",
213 	"0=Port Down",
214 	"1=Port Up"
215 };
216 
217 
218 #ifdef	_LITTLE_ENDIAN
219 #define	MAKE_BE_32(x)	{						\
220 		uint32_t	*ptr1, i;				\
221 		ptr1 = (uint32_t *)(x);					\
222 		for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
223 			*ptr1 = BE_32(*ptr1);				\
224 			ptr1++;						\
225 		}							\
226 	}
227 #else
228 #define	MAKE_BE_32(x)
229 #endif
230 
231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
232 static uint32_t fp_options = 0;
233 
234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
235 static int fp_retry_delay = FP_RETRY_DELAY;	/* retry after this delay */
236 static int fp_retry_count = FP_RETRY_COUNT;	/* number of retries */
237 unsigned int fp_offline_ticker;			/* seconds */
238 
239 /*
240  * Driver global variable to anchor the list of soft state structs for
241  * all fp driver instances.  Used with the Solaris DDI soft state functions.
242  */
243 static void *fp_driver_softstate;
244 
245 static clock_t	fp_retry_ticks;
246 static clock_t	fp_offline_ticks;
247 
248 static int fp_retry_ticker;
249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
251 
252 static int		fp_log_size = FP_LOG_SIZE;
253 static int		fp_trace = FP_TRACE_DEFAULT;
254 static fc_trace_logq_t	*fp_logq = NULL;
255 
256 int fp_get_adapter_paths(char *pathList, int count);
257 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
258 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
259     la_wwn_t tgt_pwwn, uint32_t port_id);
260 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
261 static void fp_init_symbolic_names(fc_local_port_t *port);
262 
263 
264 /*
265  * Perform global initialization
266  */
267 int
268 _init(void)
269 {
270 	int ret;
271 
272 	if ((ret = ddi_soft_state_init(&fp_driver_softstate,
273 	    sizeof (struct fc_local_port), 8)) != 0) {
274 		return (ret);
275 	}
276 
277 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
278 		ddi_soft_state_fini(&fp_driver_softstate);
279 		return (ret);
280 	}
281 
282 	fp_logq = fc_trace_alloc_logq(fp_log_size);
283 
284 	if ((ret = mod_install(&modlinkage)) != 0) {
285 		fc_trace_free_logq(fp_logq);
286 		ddi_soft_state_fini(&fp_driver_softstate);
287 		scsi_hba_fini(&modlinkage);
288 	}
289 
290 	return (ret);
291 }
292 
293 
294 /*
295  * Prepare for driver unload
296  */
297 int
298 _fini(void)
299 {
300 	int ret;
301 
302 	if ((ret = mod_remove(&modlinkage)) == 0) {
303 		fc_trace_free_logq(fp_logq);
304 		ddi_soft_state_fini(&fp_driver_softstate);
305 		scsi_hba_fini(&modlinkage);
306 	}
307 
308 	return (ret);
309 }
310 
311 
312 /*
313  * Request mod_info() to handle all cases
314  */
315 int
316 _info(struct modinfo *modinfo)
317 {
318 	return (mod_info(&modlinkage, modinfo));
319 }
320 
321 
322 /*
323  * fp_attach:
324  *
325  * The respective cmd handlers take care of performing
326  * ULP related invocations
327  */
328 static int
329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
330 {
331 	int rval;
332 
333 	/*
334 	 * We check the value of fp_offline_ticker at this
335 	 * point. The variable is global for the driver and
336 	 * not specific to an instance.
337 	 *
338 	 * If there is no user-defined value found in /etc/system
339 	 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
340 	 * The minimum setting for this offline timeout according
341 	 * to the FC-FS2 standard (Fibre Channel Framing and
342 	 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
343 	 *
344 	 * We do not recommend setting the value to less than 10
345 	 * seconds (RA_TOV) or more than 90 seconds. If this
346 	 * variable is greater than 90 seconds then drivers above
347 	 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
348 	 */
349 
350 	fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
351 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
352 	    FP_OFFLINE_TICKER);
353 
354 	if ((fp_offline_ticker < 10) ||
355 	    (fp_offline_ticker > 90)) {
356 		cmn_err(CE_WARN, "Setting fp_offline_ticker to "
357 		    "%d second(s). This is outside the "
358 		    "recommended range of 10..90 seconds",
359 		    fp_offline_ticker);
360 	}
361 
362 	/*
363 	 * Tick every second when there are commands to retry.
364 	 * It should tick at the least granular value of pkt_timeout
365 	 * (which is one second)
366 	 */
367 	fp_retry_ticker = 1;
368 
369 	fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
370 	fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
371 
372 	switch (cmd) {
373 	case DDI_ATTACH:
374 		rval = fp_attach_handler(dip);
375 		break;
376 
377 	case DDI_RESUME:
378 		rval = fp_resume_handler(dip);
379 		break;
380 
381 	default:
382 		rval = DDI_FAILURE;
383 		break;
384 	}
385 	return (rval);
386 }
387 
388 
389 /*
390  * fp_detach:
391  *
392  * If a ULP fails to handle cmd request converse of
393  * cmd is invoked for ULPs that previously succeeded
394  * cmd request.
395  */
396 static int
397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
398 {
399 	int			rval = DDI_FAILURE;
400 	fc_local_port_t		*port;
401 	fc_attach_cmd_t		converse;
402 	uint8_t			cnt;
403 
404 	if ((port = ddi_get_soft_state(fp_driver_softstate,
405 	    ddi_get_instance(dip))) == NULL) {
406 		return (DDI_FAILURE);
407 	}
408 
409 	mutex_enter(&port->fp_mutex);
410 
411 	if (port->fp_ulp_attach) {
412 		mutex_exit(&port->fp_mutex);
413 		return (DDI_FAILURE);
414 	}
415 
416 	switch (cmd) {
417 	case DDI_DETACH:
418 		if (port->fp_task != FP_TASK_IDLE) {
419 			mutex_exit(&port->fp_mutex);
420 			return (DDI_FAILURE);
421 		}
422 
423 		/* Let's attempt to quit the job handler gracefully */
424 		port->fp_soft_state |= FP_DETACH_INPROGRESS;
425 
426 		mutex_exit(&port->fp_mutex);
427 		converse = FC_CMD_ATTACH;
428 		if (fctl_detach_ulps(port, FC_CMD_DETACH,
429 		    &modlinkage) != FC_SUCCESS) {
430 			mutex_enter(&port->fp_mutex);
431 			port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
432 			mutex_exit(&port->fp_mutex);
433 			rval = DDI_FAILURE;
434 			break;
435 		}
436 
437 		mutex_enter(&port->fp_mutex);
438 		for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
439 		    cnt++) {
440 			mutex_exit(&port->fp_mutex);
441 			delay(drv_usectohz(1000000));
442 			mutex_enter(&port->fp_mutex);
443 		}
444 
445 		if (port->fp_job_head) {
446 			mutex_exit(&port->fp_mutex);
447 			rval = DDI_FAILURE;
448 			break;
449 		}
450 		mutex_exit(&port->fp_mutex);
451 
452 		rval = fp_detach_handler(port);
453 		break;
454 
455 	case DDI_SUSPEND:
456 		mutex_exit(&port->fp_mutex);
457 		converse = FC_CMD_RESUME;
458 		if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
459 		    &modlinkage) != FC_SUCCESS) {
460 			rval = DDI_FAILURE;
461 			break;
462 		}
463 		if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
464 			(void) callb_generic_cpr(&port->fp_cpr_info,
465 			    CB_CODE_CPR_RESUME);
466 		}
467 		break;
468 
469 	default:
470 		mutex_exit(&port->fp_mutex);
471 		break;
472 	}
473 
474 	/*
475 	 * Use softint to perform reattach.  Mark fp_ulp_attach so we
476 	 * don't attempt to do this repeatedly on behalf of some persistent
477 	 * caller.
478 	 */
479 	if (rval != DDI_SUCCESS) {
480 		mutex_enter(&port->fp_mutex);
481 		port->fp_ulp_attach = 1;
482 
483 		/*
484 		 * If the port is in the low power mode then there is
485 		 * possibility that fca too could be in low power mode.
486 		 * Try to raise the power before calling attach ulps.
487 		 */
488 
489 		if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
490 		    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
491 			mutex_exit(&port->fp_mutex);
492 			(void) pm_raise_power(port->fp_port_dip,
493 			    FP_PM_COMPONENT, FP_PM_PORT_UP);
494 		} else {
495 			mutex_exit(&port->fp_mutex);
496 		}
497 
498 
499 		fp_attach_ulps(port, converse);
500 
501 		mutex_enter(&port->fp_mutex);
502 		while (port->fp_ulp_attach) {
503 			cv_wait(&port->fp_attach_cv, &port->fp_mutex);
504 		}
505 
506 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
507 
508 		/*
509 		 * Mark state as detach failed so asynchronous ULP attach
510 		 * events (downstream, not the ones we're initiating with
511 		 * the call to fp_attach_ulps) are not honored.	 We're
512 		 * really still in pending detach.
513 		 */
514 		port->fp_soft_state |= FP_DETACH_FAILED;
515 
516 		mutex_exit(&port->fp_mutex);
517 	}
518 
519 	return (rval);
520 }
521 
522 
523 /*
524  * fp_getinfo:
525  *   Given the device number, return either the
526  *   dev_info_t pointer or the instance number.
527  */
528 
529 /* ARGSUSED */
530 static int
531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
532 {
533 	int		rval;
534 	minor_t		instance;
535 	fc_local_port_t *port;
536 
537 	rval = DDI_SUCCESS;
538 	instance = getminor((dev_t)arg);
539 
540 	switch (cmd) {
541 	case DDI_INFO_DEVT2DEVINFO:
542 		if ((port = ddi_get_soft_state(fp_driver_softstate,
543 		    instance)) == NULL) {
544 			rval = DDI_FAILURE;
545 			break;
546 		}
547 		*result = (void *)port->fp_port_dip;
548 		break;
549 
550 	case DDI_INFO_DEVT2INSTANCE:
551 		*result = (void *)(uintptr_t)instance;
552 		break;
553 
554 	default:
555 		rval = DDI_FAILURE;
556 		break;
557 	}
558 
559 	return (rval);
560 }
561 
562 
563 /*
564  * Entry point for power up and power down request from kernel
565  */
566 static int
567 fp_power(dev_info_t *dip, int comp, int level)
568 {
569 	int		rval = DDI_FAILURE;
570 	fc_local_port_t	*port;
571 
572 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
573 	if (port == NULL || comp != FP_PM_COMPONENT) {
574 		return (rval);
575 	}
576 
577 	switch (level) {
578 	case FP_PM_PORT_UP:
579 		rval = DDI_SUCCESS;
580 
581 		/*
582 		 * If the port is DDI_SUSPENDed, let the DDI_RESUME
583 		 * code complete the rediscovery.
584 		 */
585 		mutex_enter(&port->fp_mutex);
586 		if (port->fp_soft_state & FP_SOFT_SUSPEND) {
587 			port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
588 			port->fp_pm_level = FP_PM_PORT_UP;
589 			mutex_exit(&port->fp_mutex);
590 			fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
591 			break;
592 		}
593 
594 		if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
595 			ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
596 
597 			port->fp_pm_level = FP_PM_PORT_UP;
598 			rval = fp_power_up(port);
599 			if (rval != DDI_SUCCESS) {
600 				port->fp_pm_level = FP_PM_PORT_DOWN;
601 			}
602 		} else {
603 			port->fp_pm_level = FP_PM_PORT_UP;
604 		}
605 		mutex_exit(&port->fp_mutex);
606 		break;
607 
608 	case FP_PM_PORT_DOWN:
609 		mutex_enter(&port->fp_mutex);
610 
611 		ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
612 		if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
613 			/*
614 			 * PM framework goofed up. We have don't
615 			 * have any PM components. Let's never go down.
616 			 */
617 			mutex_exit(&port->fp_mutex);
618 			break;
619 
620 		}
621 
622 		if (port->fp_ulp_attach) {
623 			/* We shouldn't let the power go down */
624 			mutex_exit(&port->fp_mutex);
625 			break;
626 		}
627 
628 		/*
629 		 * Not a whole lot to do if we are detaching
630 		 */
631 		if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
632 			port->fp_pm_level = FP_PM_PORT_DOWN;
633 			mutex_exit(&port->fp_mutex);
634 			rval = DDI_SUCCESS;
635 			break;
636 		}
637 
638 		if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
639 			port->fp_pm_level = FP_PM_PORT_DOWN;
640 
641 			rval = fp_power_down(port);
642 			if (rval != DDI_SUCCESS) {
643 				port->fp_pm_level = FP_PM_PORT_UP;
644 				ASSERT(!(port->fp_soft_state &
645 				    FP_SOFT_POWER_DOWN));
646 			} else {
647 				ASSERT(port->fp_soft_state &
648 				    FP_SOFT_POWER_DOWN);
649 			}
650 		}
651 		mutex_exit(&port->fp_mutex);
652 		break;
653 
654 	default:
655 		break;
656 	}
657 
658 	return (rval);
659 }
660 
661 
662 /*
663  * Open FC port devctl node
664  */
665 static int
666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
667 {
668 	int		instance;
669 	fc_local_port_t *port;
670 
671 	if (otype != OTYP_CHR) {
672 		return (EINVAL);
673 	}
674 
675 	/*
676 	 * This is not a toy to play with. Allow only powerful
677 	 * users (hopefully knowledgeable) to access the port
678 	 * (A hacker potentially could download a sick binary
679 	 * file into FCA)
680 	 */
681 	if (drv_priv(credp)) {
682 		return (EPERM);
683 	}
684 
685 	instance = (int)getminor(*devp);
686 
687 	port = ddi_get_soft_state(fp_driver_softstate, instance);
688 	if (port == NULL) {
689 		return (ENXIO);
690 	}
691 
692 	mutex_enter(&port->fp_mutex);
693 	if (port->fp_flag & FP_EXCL) {
694 		/*
695 		 * It is already open for exclusive access.
696 		 * So shut the door on this caller.
697 		 */
698 		mutex_exit(&port->fp_mutex);
699 		return (EBUSY);
700 	}
701 
702 	if (flag & FEXCL) {
703 		if (port->fp_flag & FP_OPEN) {
704 			/*
705 			 * Exclusive operation not possible
706 			 * as it is already opened
707 			 */
708 			mutex_exit(&port->fp_mutex);
709 			return (EBUSY);
710 		}
711 		port->fp_flag |= FP_EXCL;
712 	}
713 	port->fp_flag |= FP_OPEN;
714 	mutex_exit(&port->fp_mutex);
715 
716 	return (0);
717 }
718 
719 
720 /*
721  * The driver close entry point is called on the last close()
722  * of a device. So it is perfectly alright to just clobber the
723  * open flag and reset it to idle (instead of having to reset
724  * each flag bits). For any confusion, check out close(9E).
725  */
726 
727 /* ARGSUSED */
728 static int
729 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
730 {
731 	int		instance;
732 	fc_local_port_t *port;
733 
734 	if (otype != OTYP_CHR) {
735 		return (EINVAL);
736 	}
737 
738 	instance = (int)getminor(dev);
739 
740 	port = ddi_get_soft_state(fp_driver_softstate, instance);
741 	if (port == NULL) {
742 		return (ENXIO);
743 	}
744 
745 	mutex_enter(&port->fp_mutex);
746 	if ((port->fp_flag & FP_OPEN) == 0) {
747 		mutex_exit(&port->fp_mutex);
748 		return (ENODEV);
749 	}
750 	port->fp_flag = FP_IDLE;
751 	mutex_exit(&port->fp_mutex);
752 
753 	return (0);
754 }
755 
756 /*
757  * Handle IOCTL requests
758  */
759 
760 /* ARGSUSED */
761 static int
762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
763 {
764 	int		instance;
765 	int		ret = 0;
766 	fcio_t		fcio;
767 	fc_local_port_t *port;
768 
769 	instance = (int)getminor(dev);
770 
771 	port = ddi_get_soft_state(fp_driver_softstate, instance);
772 	if (port == NULL) {
773 		return (ENXIO);
774 	}
775 
776 	mutex_enter(&port->fp_mutex);
777 	if ((port->fp_flag & FP_OPEN) == 0) {
778 		mutex_exit(&port->fp_mutex);
779 		return (ENXIO);
780 	}
781 
782 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
783 		mutex_exit(&port->fp_mutex);
784 		return (ENXIO);
785 	}
786 
787 	mutex_exit(&port->fp_mutex);
788 
789 	/* this will raise power if necessary */
790 	ret = fctl_busy_port(port);
791 	if (ret != 0) {
792 		return (ret);
793 	}
794 
795 	ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
796 
797 
798 	switch (cmd) {
799 	case FCIO_CMD: {
800 #ifdef	_MULTI_DATAMODEL
801 		switch (ddi_model_convert_from(mode & FMODELS)) {
802 		case DDI_MODEL_ILP32: {
803 			struct fcio32 fcio32;
804 
805 			if (ddi_copyin((void *)data, (void *)&fcio32,
806 			    sizeof (struct fcio32), mode)) {
807 				ret = EFAULT;
808 				break;
809 			}
810 			fcio.fcio_xfer = fcio32.fcio_xfer;
811 			fcio.fcio_cmd = fcio32.fcio_cmd;
812 			fcio.fcio_flags = fcio32.fcio_flags;
813 			fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
814 			fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
815 			fcio.fcio_ibuf =
816 			    (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
817 			fcio.fcio_olen = (size_t)fcio32.fcio_olen;
818 			fcio.fcio_obuf =
819 			    (caddr_t)(uintptr_t)fcio32.fcio_obuf;
820 			fcio.fcio_alen = (size_t)fcio32.fcio_alen;
821 			fcio.fcio_abuf =
822 			    (caddr_t)(uintptr_t)fcio32.fcio_abuf;
823 			fcio.fcio_errno = fcio32.fcio_errno;
824 			break;
825 		}
826 
827 		case DDI_MODEL_NONE:
828 			if (ddi_copyin((void *)data, (void *)&fcio,
829 			    sizeof (fcio_t), mode)) {
830 				ret = EFAULT;
831 			}
832 			break;
833 		}
834 #else	/* _MULTI_DATAMODEL */
835 		if (ddi_copyin((void *)data, (void *)&fcio,
836 		    sizeof (fcio_t), mode)) {
837 			ret = EFAULT;
838 			break;
839 		}
840 #endif	/* _MULTI_DATAMODEL */
841 		if (!ret) {
842 			ret = fp_fciocmd(port, data, mode, &fcio);
843 		}
844 		break;
845 	}
846 
847 	default:
848 		ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
849 		    mode, credp, rval);
850 	}
851 
852 	fctl_idle_port(port);
853 
854 	return (ret);
855 }
856 
857 
858 /*
859  * Init Symbolic Port Name and Node Name
860  * LV will try to get symbolic names from FCA driver
861  * and register these to name server,
862  * if LV fails to get these,
863  * LV will register its default symbolic names to name server.
864  * The Default symbolic node name format is :
865  *	<hostname>:<hba driver name>(instance)
866  * The Default symbolic port name format is :
867  *	<fp path name>
868  */
869 static void
870 fp_init_symbolic_names(fc_local_port_t *port)
871 {
872 	const char *vendorname = ddi_driver_name(port->fp_fca_dip);
873 	char *sym_name;
874 	char fcaname[50] = {0};
875 	int hostnlen, fcanlen;
876 
877 	if (port->fp_sym_node_namelen == 0) {
878 		hostnlen = strlen(utsname.nodename);
879 		(void) snprintf(fcaname, sizeof (fcaname),
880 		    "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
881 		fcanlen = strlen(fcaname);
882 
883 		sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
884 		(void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
885 		port->fp_sym_node_namelen = strlen(sym_name);
886 		if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
887 			port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
888 		}
889 		(void) strncpy(port->fp_sym_node_name, sym_name,
890 		    port->fp_sym_node_namelen);
891 		kmem_free(sym_name, hostnlen + fcanlen + 2);
892 	}
893 
894 	if (port->fp_sym_port_namelen == 0) {
895 		char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
896 
897 		(void) ddi_pathname(port->fp_port_dip, pathname);
898 		port->fp_sym_port_namelen = strlen(pathname);
899 		if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
900 			port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
901 		}
902 		(void) strncpy(port->fp_sym_port_name, pathname,
903 		    port->fp_sym_port_namelen);
904 		kmem_free(pathname, MAXPATHLEN);
905 	}
906 }
907 
908 
909 /*
910  * Perform port attach
911  */
912 static int
913 fp_attach_handler(dev_info_t *dip)
914 {
915 	int			rval;
916 	int			instance;
917 	int			port_num;
918 	int			port_len;
919 	char			name[30];
920 	char			i_pwwn[17];
921 	fp_cmd_t		*pkt;
922 	uint32_t		ub_count;
923 	fc_local_port_t		*port;
924 	job_request_t		*job;
925 	fc_local_port_t *phyport = NULL;
926 	int portpro1;
927 	char pwwn[17], nwwn[17];
928 
929 	instance = ddi_get_instance(dip);
930 	port_len = sizeof (port_num);
931 	rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
932 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
933 	    (caddr_t)&port_num, &port_len);
934 	if (rval != DDI_SUCCESS) {
935 		cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
936 		    instance);
937 		return (DDI_FAILURE);
938 	}
939 
940 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
941 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
942 		cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
943 		    instance);
944 		return (DDI_FAILURE);
945 	}
946 
947 	if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
948 	    DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
949 		cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
950 		    " point minor node", instance);
951 		ddi_remove_minor_node(dip, NULL);
952 		return (DDI_FAILURE);
953 	}
954 
955 	if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
956 	    != DDI_SUCCESS) {
957 		cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
958 		    instance);
959 		ddi_remove_minor_node(dip, NULL);
960 		return (DDI_FAILURE);
961 	}
962 	port = ddi_get_soft_state(fp_driver_softstate, instance);
963 
964 	(void) sprintf(port->fp_ibuf, "fp(%d)", instance);
965 
966 	port->fp_instance = instance;
967 	port->fp_ulp_attach = 1;
968 	port->fp_port_num = port_num;
969 	port->fp_verbose = fp_verbosity;
970 	port->fp_options = fp_options;
971 
972 	port->fp_fca_dip = ddi_get_parent(dip);
973 	port->fp_port_dip = dip;
974 	port->fp_fca_tran = (fc_fca_tran_t *)
975 	    ddi_get_driver_private(port->fp_fca_dip);
976 
977 	port->fp_task = port->fp_last_task = FP_TASK_IDLE;
978 
979 	/*
980 	 * Init the starting value of fp_rscn_count. Note that if
981 	 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
982 	 * actual # of RSCNs will be (fp_rscn_count - 1)
983 	 */
984 	port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
985 
986 	mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
987 	cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
988 	cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
989 
990 	(void) sprintf(name, "fp%d_cache", instance);
991 
992 	if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
993 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
994 	    "phyport-instance", -1)) != -1) {
995 		phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
996 		fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
997 		fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
998 		port->fp_npiv_type = FC_NPIV_PORT;
999 	}
1000 
1001 	/*
1002 	 * Allocate the pool of fc_packet_t structs to be used with
1003 	 * this fp instance.
1004 	 */
1005 	port->fp_pkt_cache = kmem_cache_create(name,
1006 	    (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1007 	    fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1008 	    NULL, 0);
1009 	port->fp_out_fpcmds = 0;
1010 	if (port->fp_pkt_cache == NULL) {
1011 		goto cache_alloc_failed;
1012 	}
1013 
1014 
1015 	/*
1016 	 * Allocate the d_id and pwwn hash tables for all remote ports
1017 	 * connected to this local port.
1018 	 */
1019 	port->fp_did_table = kmem_zalloc(did_table_size *
1020 	    sizeof (struct d_id_hash), KM_SLEEP);
1021 
1022 	port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1023 	    sizeof (struct pwwn_hash), KM_SLEEP);
1024 
1025 	port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1026 	    MINCLSYSPRI, 1, 16, 0);
1027 
1028 	/* Indicate that don't have the pm components yet */
1029 	port->fp_soft_state |=	FP_SOFT_NO_PMCOMP;
1030 
1031 	/*
1032 	 * Bind the callbacks with the FCA driver. This will open the gate
1033 	 * for asynchronous callbacks, so after this call the fp_mutex
1034 	 * must be held when updating the fc_local_port_t struct.
1035 	 *
1036 	 * This is done _before_ setting up the job thread so we can avoid
1037 	 * cleaning up after the thread_create() in the error path. This
1038 	 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1039 	 */
1040 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1041 		goto bind_callbacks_failed;
1042 	}
1043 
1044 	if (phyport) {
1045 		mutex_enter(&phyport->fp_mutex);
1046 		if (phyport->fp_port_next) {
1047 			phyport->fp_port_next->fp_port_prev = port;
1048 			port->fp_port_next =  phyport->fp_port_next;
1049 			phyport->fp_port_next = port;
1050 			port->fp_port_prev = phyport;
1051 		} else {
1052 			phyport->fp_port_next = port;
1053 			phyport->fp_port_prev = port;
1054 			port->fp_port_next =  phyport;
1055 			port->fp_port_prev = phyport;
1056 		}
1057 		mutex_exit(&phyport->fp_mutex);
1058 	}
1059 
1060 	/*
1061 	 * Init Symbolic Names
1062 	 */
1063 	fp_init_symbolic_names(port);
1064 
1065 	pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1066 	    KM_SLEEP, NULL);
1067 
1068 	if (pkt == NULL) {
1069 		cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1070 		    instance);
1071 		goto alloc_els_packet_failed;
1072 	}
1073 
1074 	(void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1075 	    v.v_maxsyspri - 2);
1076 
1077 	fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1078 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1079 	    i_pwwn) != DDI_PROP_SUCCESS) {
1080 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1081 		    "fp(%d): Updating 'initiator-port' property"
1082 		    " on fp dev_info node failed", instance);
1083 	}
1084 
1085 	fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1086 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1087 	    i_pwwn) != DDI_PROP_SUCCESS) {
1088 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1089 		    "fp(%d): Updating 'initiator-node' property"
1090 		    " on fp dev_info node failed", instance);
1091 	}
1092 
1093 	mutex_enter(&port->fp_mutex);
1094 	port->fp_els_resp_pkt = pkt;
1095 	mutex_exit(&port->fp_mutex);
1096 
1097 	/*
1098 	 * Determine the count of unsolicited buffers this FCA can support
1099 	 */
1100 	fp_retrieve_caps(port);
1101 
1102 	/*
1103 	 * Allocate unsolicited buffer tokens
1104 	 */
1105 	if (port->fp_ub_count) {
1106 		ub_count = port->fp_ub_count;
1107 		port->fp_ub_tokens = kmem_zalloc(ub_count *
1108 		    sizeof (*port->fp_ub_tokens), KM_SLEEP);
1109 		/*
1110 		 * Do not fail the attach if unsolicited buffer allocation
1111 		 * fails; Just try to get along with whatever the FCA can do.
1112 		 */
1113 		if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1114 		    FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1115 		    FC_SUCCESS || ub_count != port->fp_ub_count) {
1116 			cmn_err(CE_WARN, "fp(%d): failed to allocate "
1117 			    " Unsolicited buffers. proceeding with attach...",
1118 			    instance);
1119 			kmem_free(port->fp_ub_tokens,
1120 			    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1121 			port->fp_ub_tokens = NULL;
1122 		}
1123 	}
1124 
1125 	fp_load_ulp_modules(dip, port);
1126 
1127 	/*
1128 	 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1129 	 */
1130 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1131 	    "pm-hardware-state", "needs-suspend-resume",
1132 	    strlen("needs-suspend-resume") + 1);
1133 
1134 	/*
1135 	 * fctl maintains a list of all port handles, so
1136 	 * help fctl add this one to its list now.
1137 	 */
1138 	mutex_enter(&port->fp_mutex);
1139 	fctl_add_port(port);
1140 
1141 	/*
1142 	 * If a state change is already in progress, set the bind state t
1143 	 * OFFLINE as well, so further state change callbacks into ULPs
1144 	 * will pass the appropriate states
1145 	 */
1146 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1147 	    port->fp_statec_busy) {
1148 		port->fp_bind_state = FC_STATE_OFFLINE;
1149 		mutex_exit(&port->fp_mutex);
1150 
1151 		fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1152 	} else {
1153 		/*
1154 		 * Without dropping the mutex, ensure that the port
1155 		 * startup happens ahead of state change callback
1156 		 * processing
1157 		 */
1158 		ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1159 
1160 		port->fp_last_task = port->fp_task;
1161 		port->fp_task = FP_TASK_PORT_STARTUP;
1162 
1163 		job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1164 		    fp_startup_done, (opaque_t)port, KM_SLEEP);
1165 
1166 		port->fp_job_head = port->fp_job_tail = job;
1167 
1168 		cv_signal(&port->fp_cv);
1169 
1170 		mutex_exit(&port->fp_mutex);
1171 	}
1172 
1173 	mutex_enter(&port->fp_mutex);
1174 	while (port->fp_ulp_attach) {
1175 		cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1176 	}
1177 	mutex_exit(&port->fp_mutex);
1178 
1179 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1180 	    "pm-components", fp_pm_comps,
1181 	    sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1182 	    DDI_PROP_SUCCESS) {
1183 		FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1184 		    " components property, PM disabled on this port.");
1185 		mutex_enter(&port->fp_mutex);
1186 		port->fp_pm_level = FP_PM_PORT_UP;
1187 		mutex_exit(&port->fp_mutex);
1188 	} else {
1189 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1190 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1191 			FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1192 			    " power level");
1193 			mutex_enter(&port->fp_mutex);
1194 			port->fp_pm_level = FP_PM_PORT_UP;
1195 			mutex_exit(&port->fp_mutex);
1196 		}
1197 
1198 		/*
1199 		 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1200 		 * the call to pm_raise_power.	The PM framework can't
1201 		 * handle multiple threads calling into it during attach.
1202 		 */
1203 
1204 		mutex_enter(&port->fp_mutex);
1205 		port->fp_soft_state &=	~FP_SOFT_NO_PMCOMP;
1206 		mutex_exit(&port->fp_mutex);
1207 	}
1208 
1209 	ddi_report_dev(dip);
1210 
1211 	fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1212 
1213 	return (DDI_SUCCESS);
1214 
1215 	/*
1216 	 * Unwind any/all preceeding allocations in the event of an error.
1217 	 */
1218 
1219 alloc_els_packet_failed:
1220 
1221 	if (port->fp_fca_handle != NULL) {
1222 		port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1223 		port->fp_fca_handle = NULL;
1224 	}
1225 
1226 	if (port->fp_ub_tokens != NULL) {
1227 		(void) fc_ulp_ubfree(port, port->fp_ub_count,
1228 		    port->fp_ub_tokens);
1229 		kmem_free(port->fp_ub_tokens,
1230 		    port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1231 		port->fp_ub_tokens = NULL;
1232 	}
1233 
1234 	if (port->fp_els_resp_pkt != NULL) {
1235 		fp_free_pkt(port->fp_els_resp_pkt);
1236 		port->fp_els_resp_pkt = NULL;
1237 	}
1238 
1239 bind_callbacks_failed:
1240 
1241 	if (port->fp_taskq != NULL) {
1242 		taskq_destroy(port->fp_taskq);
1243 	}
1244 
1245 	if (port->fp_pwwn_table != NULL) {
1246 		kmem_free(port->fp_pwwn_table,
1247 		    pwwn_table_size * sizeof (struct pwwn_hash));
1248 		port->fp_pwwn_table = NULL;
1249 	}
1250 
1251 	if (port->fp_did_table != NULL) {
1252 		kmem_free(port->fp_did_table,
1253 		    did_table_size * sizeof (struct d_id_hash));
1254 		port->fp_did_table = NULL;
1255 	}
1256 
1257 	if (port->fp_pkt_cache != NULL) {
1258 		kmem_cache_destroy(port->fp_pkt_cache);
1259 		port->fp_pkt_cache = NULL;
1260 	}
1261 
1262 cache_alloc_failed:
1263 
1264 	cv_destroy(&port->fp_attach_cv);
1265 	cv_destroy(&port->fp_cv);
1266 	mutex_destroy(&port->fp_mutex);
1267 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1268 	ddi_soft_state_free(fp_driver_softstate, instance);
1269 	ddi_prop_remove_all(dip);
1270 
1271 	return (DDI_FAILURE);
1272 }
1273 
1274 
1275 /*
1276  * Handle DDI_RESUME request
1277  */
1278 static int
1279 fp_resume_handler(dev_info_t *dip)
1280 {
1281 	int		rval;
1282 	fc_local_port_t *port;
1283 
1284 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1285 
1286 	ASSERT(port != NULL);
1287 
1288 #ifdef	DEBUG
1289 	mutex_enter(&port->fp_mutex);
1290 	ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1291 	mutex_exit(&port->fp_mutex);
1292 #endif
1293 
1294 	/*
1295 	 * If the port was power suspended, raise the power level
1296 	 */
1297 	mutex_enter(&port->fp_mutex);
1298 	if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1299 	    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1300 		ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1301 
1302 		mutex_exit(&port->fp_mutex);
1303 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1304 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1305 			FP_TRACE(FP_NHEAD2(9, 0),
1306 			    "Failed to raise the power level");
1307 			return (DDI_FAILURE);
1308 		}
1309 		mutex_enter(&port->fp_mutex);
1310 	}
1311 	port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1312 	mutex_exit(&port->fp_mutex);
1313 
1314 	/*
1315 	 * All the discovery is initiated and handled by per-port thread.
1316 	 * Further all the discovery is done in handled in callback mode
1317 	 * (not polled mode); In a specific case such as this, the discovery
1318 	 * is required to happen in polled mode. The easiest way out is
1319 	 * to bail out port thread and get started. Come back and fix this
1320 	 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1321 	 * will do on-demand discovery during pre-power-up busctl handling
1322 	 * which will only be possible when SCSA provides a new HBA vector
1323 	 * for sending down the PM busctl requests.
1324 	 */
1325 	(void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1326 
1327 	rval = fp_resume_all(port, FC_CMD_RESUME);
1328 	if (rval != DDI_SUCCESS) {
1329 		mutex_enter(&port->fp_mutex);
1330 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1331 		mutex_exit(&port->fp_mutex);
1332 		(void) callb_generic_cpr(&port->fp_cpr_info,
1333 		    CB_CODE_CPR_CHKPT);
1334 	}
1335 
1336 	return (rval);
1337 }
1338 
1339 /*
1340  * Perform FC Port power on initialization
1341  */
1342 static int
1343 fp_power_up(fc_local_port_t *port)
1344 {
1345 	int	rval;
1346 
1347 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1348 
1349 	ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1350 	ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1351 
1352 	port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1353 
1354 	mutex_exit(&port->fp_mutex);
1355 
1356 	rval = fp_resume_all(port, FC_CMD_POWER_UP);
1357 	if (rval != DDI_SUCCESS) {
1358 		mutex_enter(&port->fp_mutex);
1359 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1360 	} else {
1361 		mutex_enter(&port->fp_mutex);
1362 	}
1363 
1364 	return (rval);
1365 }
1366 
1367 
1368 /*
1369  * It is important to note that the power may possibly be removed between
1370  * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1371  * FC port hardware would have gone through an OFFLINE to ONLINE transition
1372  * (hardware state). In this case, the port driver may need to rediscover the
1373  * topology, perform LOGINs, register with the name server again and perform
1374  * any such port initialization procedures. To perform LOGINs, the driver could
1375  * use the port device handle to see if a LOGIN needs to be performed and use
1376  * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1377  * or removed) which will be reflected in the map the ULPs will see.
1378  */
1379 static int
1380 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1381 {
1382 
1383 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1384 
1385 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1386 		return (DDI_FAILURE);
1387 	}
1388 
1389 	mutex_enter(&port->fp_mutex);
1390 
1391 	/*
1392 	 * If there are commands queued for delayed retry, instead of
1393 	 * working the hard way to figure out which ones are good for
1394 	 * restart and which ones not (ELSs are definitely not good
1395 	 * as the port will have to go through a new spin of rediscovery
1396 	 * now), so just flush them out.
1397 	 */
1398 	if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1399 		fp_cmd_t	*cmd;
1400 
1401 		port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1402 
1403 		mutex_exit(&port->fp_mutex);
1404 		while ((cmd = fp_deque_cmd(port)) != NULL) {
1405 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1406 			fp_iodone(cmd);
1407 		}
1408 		mutex_enter(&port->fp_mutex);
1409 	}
1410 
1411 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1412 		if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1413 		    port->fp_dev_count) {
1414 			port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1415 			port->fp_offline_tid = timeout(fp_offline_timeout,
1416 			    (caddr_t)port, fp_offline_ticks);
1417 		}
1418 		if (port->fp_job_head) {
1419 			cv_signal(&port->fp_cv);
1420 		}
1421 		mutex_exit(&port->fp_mutex);
1422 		fctl_attach_ulps(port, cmd, &modlinkage);
1423 	} else {
1424 		struct job_request *job;
1425 
1426 		/*
1427 		 * If an OFFLINE timer was running at the time of
1428 		 * suspending, there is no need to restart it as
1429 		 * the port is ONLINE now.
1430 		 */
1431 		port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1432 		if (port->fp_statec_busy == 0) {
1433 			port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1434 		}
1435 		port->fp_statec_busy++;
1436 		mutex_exit(&port->fp_mutex);
1437 
1438 		job = fctl_alloc_job(JOB_PORT_ONLINE,
1439 		    JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1440 		fctl_enque_job(port, job);
1441 
1442 		fctl_jobwait(job);
1443 		fctl_remove_oldies(port);
1444 
1445 		fctl_attach_ulps(port, cmd, &modlinkage);
1446 		fctl_dealloc_job(job);
1447 	}
1448 
1449 	return (DDI_SUCCESS);
1450 }
1451 
1452 
1453 /*
1454  * At this time, there shouldn't be any I/O requests on this port.
1455  * But the unsolicited callbacks from the underlying FCA port need
1456  * to be handled very carefully. The steps followed to handle the
1457  * DDI_DETACH are:
1458  *	+	Grab the port driver mutex, check if the unsolicited
1459  *		callback is currently under processing. If true, fail
1460  *		the DDI_DETACH request by printing a message; If false
1461  *		mark the DDI_DETACH as under progress, so that any
1462  *		further unsolicited callbacks get bounced.
1463  *	+	Perform PRLO/LOGO if necessary, cleanup all the data
1464  *		structures.
1465  *	+	Get the job_handler thread to gracefully exit.
1466  *	+	Unregister callbacks with the FCA port.
1467  *	+	Now that some peace is found, notify all the ULPs of
1468  *		DDI_DETACH request (using ulp_port_detach entry point)
1469  *	+	Free all mutexes, semaphores, conditional variables.
1470  *	+	Free the soft state, return success.
1471  *
1472  * Important considerations:
1473  *		Port driver de-registers state change and unsolicited
1474  *		callbacks before taking up the task of notifying ULPs
1475  *		and performing PRLO and LOGOs.
1476  *
1477  *		A port may go offline at the time PRLO/LOGO is being
1478  *		requested. It is expected of all FCA drivers to fail
1479  *		such requests either immediately with a FC_OFFLINE
1480  *		return code to fc_fca_transport() or return the packet
1481  *		asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1482  */
1483 static int
1484 fp_detach_handler(fc_local_port_t *port)
1485 {
1486 	job_request_t	*job;
1487 	uint32_t	delay_count;
1488 	fc_orphan_t	*orp, *tmporp;
1489 
1490 	/*
1491 	 * In a Fabric topology with many host ports connected to
1492 	 * a switch, another detaching instance of fp might have
1493 	 * triggered a LOGO (which is an unsolicited request to
1494 	 * this instance). So in order to be able to successfully
1495 	 * detach by taking care of such cases a delay of about
1496 	 * 30 seconds is introduced.
1497 	 */
1498 	delay_count = 0;
1499 	mutex_enter(&port->fp_mutex);
1500 	if (port->fp_out_fpcmds != 0) {
1501 		/*
1502 		 * At this time we can only check fp internal commands, because
1503 		 * sd/ssd/scsi_vhci should have finsihed all their commands,
1504 		 * fcp/fcip/fcsm should have finished all their commands.
1505 		 *
1506 		 * It seems that all fp internal commands are asynchronous now.
1507 		 */
1508 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1509 		mutex_exit(&port->fp_mutex);
1510 
1511 		cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1512 		    " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1513 		return (DDI_FAILURE);
1514 	}
1515 
1516 	while ((port->fp_soft_state &
1517 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1518 	    (delay_count < 30)) {
1519 		mutex_exit(&port->fp_mutex);
1520 		delay_count++;
1521 		delay(drv_usectohz(1000000));
1522 		mutex_enter(&port->fp_mutex);
1523 	}
1524 
1525 	if (port->fp_soft_state &
1526 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1527 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1528 		mutex_exit(&port->fp_mutex);
1529 
1530 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1531 		    " Failing detach", port->fp_instance);
1532 		return (DDI_FAILURE);
1533 	}
1534 
1535 	port->fp_soft_state |= FP_SOFT_IN_DETACH;
1536 	port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1537 	mutex_exit(&port->fp_mutex);
1538 
1539 	/*
1540 	 * If we're powered down, we need to raise power prior to submitting
1541 	 * the JOB_PORT_SHUTDOWN job.  Otherwise, the job handler will never
1542 	 * process the shutdown job.
1543 	 */
1544 	if (fctl_busy_port(port) != 0) {
1545 		cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1546 		    port->fp_instance);
1547 		mutex_enter(&port->fp_mutex);
1548 		port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1549 		mutex_exit(&port->fp_mutex);
1550 		return (DDI_FAILURE);
1551 	}
1552 
1553 	/*
1554 	 * This will deallocate data structs and cause the "job" thread
1555 	 * to exit, in preparation for DDI_DETACH on the instance.
1556 	 * This can sleep for an arbitrary duration, since it waits for
1557 	 * commands over the wire, timeout(9F) callbacks, etc.
1558 	 *
1559 	 * CAUTION: There is still a race here, where the "job" thread
1560 	 * can still be executing code even tho the fctl_jobwait() call
1561 	 * below has returned to us.  In theory the fp driver could even be
1562 	 * modunloaded even tho the job thread isn't done executing.
1563 	 * without creating the race condition.
1564 	 */
1565 	job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1566 	    (opaque_t)port, KM_SLEEP);
1567 	fctl_enque_job(port, job);
1568 	fctl_jobwait(job);
1569 	fctl_dealloc_job(job);
1570 
1571 
1572 	(void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1573 	    FP_PM_PORT_DOWN);
1574 
1575 	if (port->fp_taskq) {
1576 		taskq_destroy(port->fp_taskq);
1577 	}
1578 
1579 	ddi_prop_remove_all(port->fp_port_dip);
1580 
1581 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1582 
1583 	fctl_remove_port(port);
1584 
1585 	fp_free_pkt(port->fp_els_resp_pkt);
1586 
1587 	if (port->fp_ub_tokens) {
1588 		if (fc_ulp_ubfree(port, port->fp_ub_count,
1589 		    port->fp_ub_tokens) != FC_SUCCESS) {
1590 			cmn_err(CE_WARN, "fp(%d): couldn't free "
1591 			    " unsolicited buffers", port->fp_instance);
1592 		}
1593 		kmem_free(port->fp_ub_tokens,
1594 		    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1595 		port->fp_ub_tokens = NULL;
1596 	}
1597 
1598 	if (port->fp_pkt_cache != NULL) {
1599 		kmem_cache_destroy(port->fp_pkt_cache);
1600 	}
1601 
1602 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1603 
1604 	mutex_enter(&port->fp_mutex);
1605 	if (port->fp_did_table) {
1606 		kmem_free(port->fp_did_table, did_table_size *
1607 		    sizeof (struct d_id_hash));
1608 	}
1609 
1610 	if (port->fp_pwwn_table) {
1611 		kmem_free(port->fp_pwwn_table, pwwn_table_size *
1612 		    sizeof (struct pwwn_hash));
1613 	}
1614 	orp = port->fp_orphan_list;
1615 	while (orp) {
1616 		tmporp = orp;
1617 		orp = orp->orp_next;
1618 		kmem_free(tmporp, sizeof (*orp));
1619 	}
1620 
1621 	mutex_exit(&port->fp_mutex);
1622 
1623 	fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1624 
1625 	mutex_destroy(&port->fp_mutex);
1626 	cv_destroy(&port->fp_attach_cv);
1627 	cv_destroy(&port->fp_cv);
1628 	ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1629 
1630 	return (DDI_SUCCESS);
1631 }
1632 
1633 
1634 /*
1635  * Steps to perform DDI_SUSPEND operation on a FC port
1636  *
1637  *	- If already suspended return DDI_FAILURE
1638  *	- If already power-suspended return DDI_SUCCESS
1639  *	- If an unsolicited callback or state change handling is in
1640  *	    in progress, throw a warning message, return DDI_FAILURE
1641  *	- Cancel timeouts
1642  *	- SUSPEND the job_handler thread (means do nothing as it is
1643  *	    taken care of by the CPR frame work)
1644  */
1645 static int
1646 fp_suspend_handler(fc_local_port_t *port)
1647 {
1648 	uint32_t	delay_count;
1649 
1650 	mutex_enter(&port->fp_mutex);
1651 
1652 	/*
1653 	 * The following should never happen, but
1654 	 * let the driver be more defensive here
1655 	 */
1656 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1657 		mutex_exit(&port->fp_mutex);
1658 		return (DDI_FAILURE);
1659 	}
1660 
1661 	/*
1662 	 * If the port is already power suspended, there
1663 	 * is nothing else to do, So return DDI_SUCCESS,
1664 	 * but mark the SUSPEND bit in the soft state
1665 	 * before leaving.
1666 	 */
1667 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1668 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1669 		mutex_exit(&port->fp_mutex);
1670 		return (DDI_SUCCESS);
1671 	}
1672 
1673 	/*
1674 	 * Check if an unsolicited callback or state change handling is
1675 	 * in progress. If true, fail the suspend operation; also throw
1676 	 * a warning message notifying the failure. Note that Sun PCI
1677 	 * hotplug spec recommends messages in cases of failure (but
1678 	 * not flooding the console)
1679 	 *
1680 	 * Busy waiting for a short interval (500 millisecond ?) to see
1681 	 * if the callback processing completes may be another idea. Since
1682 	 * most of the callback processing involves a lot of work, it
1683 	 * is safe to just fail the SUSPEND operation. It is definitely
1684 	 * not bad to fail the SUSPEND operation if the driver is busy.
1685 	 */
1686 	delay_count = 0;
1687 	while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1688 	    FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1689 		mutex_exit(&port->fp_mutex);
1690 		delay_count++;
1691 		delay(drv_usectohz(1000000));
1692 		mutex_enter(&port->fp_mutex);
1693 	}
1694 
1695 	if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1696 	    FP_SOFT_IN_UNSOL_CB)) {
1697 		mutex_exit(&port->fp_mutex);
1698 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1699 		    " Failing suspend", port->fp_instance);
1700 		return (DDI_FAILURE);
1701 	}
1702 
1703 	/*
1704 	 * Check of FC port thread is busy
1705 	 */
1706 	if (port->fp_job_head) {
1707 		mutex_exit(&port->fp_mutex);
1708 		FP_TRACE(FP_NHEAD2(9, 0),
1709 		    "FC port thread is busy: Failing suspend");
1710 		return (DDI_FAILURE);
1711 	}
1712 	port->fp_soft_state |= FP_SOFT_SUSPEND;
1713 
1714 	fp_suspend_all(port);
1715 	mutex_exit(&port->fp_mutex);
1716 
1717 	return (DDI_SUCCESS);
1718 }
1719 
1720 
1721 /*
1722  * Prepare for graceful power down of a FC port
1723  */
1724 static int
1725 fp_power_down(fc_local_port_t *port)
1726 {
1727 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1728 
1729 	/*
1730 	 * Power down request followed by a DDI_SUSPEND should
1731 	 * never happen; If it does return DDI_SUCCESS
1732 	 */
1733 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1734 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1735 		return (DDI_SUCCESS);
1736 	}
1737 
1738 	/*
1739 	 * If the port is already power suspended, there
1740 	 * is nothing else to do, So return DDI_SUCCESS,
1741 	 */
1742 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1743 		return (DDI_SUCCESS);
1744 	}
1745 
1746 	/*
1747 	 * Check if an unsolicited callback or state change handling
1748 	 * is in progress. If true, fail the PM suspend operation.
1749 	 * But don't print a message unless the verbosity of the
1750 	 * driver desires otherwise.
1751 	 */
1752 	if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1753 	    (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1754 		FP_TRACE(FP_NHEAD2(9, 0),
1755 		    "Unsolicited callback in progress: Failing power down");
1756 		return (DDI_FAILURE);
1757 	}
1758 
1759 	/*
1760 	 * Check of FC port thread is busy
1761 	 */
1762 	if (port->fp_job_head) {
1763 		FP_TRACE(FP_NHEAD2(9, 0),
1764 		    "FC port thread is busy: Failing power down");
1765 		return (DDI_FAILURE);
1766 	}
1767 	port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1768 
1769 	/*
1770 	 * check if the ULPs are ready for power down
1771 	 */
1772 	mutex_exit(&port->fp_mutex);
1773 	if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1774 	    &modlinkage) != FC_SUCCESS) {
1775 		mutex_enter(&port->fp_mutex);
1776 		port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1777 		mutex_exit(&port->fp_mutex);
1778 
1779 		/*
1780 		 * Power back up the obedient ULPs that went down
1781 		 */
1782 		fp_attach_ulps(port, FC_CMD_POWER_UP);
1783 
1784 		FP_TRACE(FP_NHEAD2(9, 0),
1785 		    "ULP(s) busy, detach_ulps failed. Failing power down");
1786 		mutex_enter(&port->fp_mutex);
1787 		return (DDI_FAILURE);
1788 	}
1789 	mutex_enter(&port->fp_mutex);
1790 
1791 	fp_suspend_all(port);
1792 
1793 	return (DDI_SUCCESS);
1794 }
1795 
1796 
1797 /*
1798  * Suspend the entire FC port
1799  */
1800 static void
1801 fp_suspend_all(fc_local_port_t *port)
1802 {
1803 	int			index;
1804 	struct pwwn_hash	*head;
1805 	fc_remote_port_t	*pd;
1806 
1807 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1808 
1809 	if (port->fp_wait_tid != 0) {
1810 		timeout_id_t	tid;
1811 
1812 		tid = port->fp_wait_tid;
1813 		port->fp_wait_tid = (timeout_id_t)NULL;
1814 		mutex_exit(&port->fp_mutex);
1815 		(void) untimeout(tid);
1816 		mutex_enter(&port->fp_mutex);
1817 		port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1818 	}
1819 
1820 	if (port->fp_offline_tid) {
1821 		timeout_id_t	tid;
1822 
1823 		tid = port->fp_offline_tid;
1824 		port->fp_offline_tid = (timeout_id_t)NULL;
1825 		mutex_exit(&port->fp_mutex);
1826 		(void) untimeout(tid);
1827 		mutex_enter(&port->fp_mutex);
1828 		port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1829 	}
1830 	mutex_exit(&port->fp_mutex);
1831 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1832 	mutex_enter(&port->fp_mutex);
1833 
1834 	/*
1835 	 * Mark all devices as OLD, and reset the LOGIN state as well
1836 	 * (this will force the ULPs to perform a LOGIN after calling
1837 	 * fc_portgetmap() during RESUME/PM_RESUME)
1838 	 */
1839 	for (index = 0; index < pwwn_table_size; index++) {
1840 		head = &port->fp_pwwn_table[index];
1841 		pd = head->pwwn_head;
1842 		while (pd != NULL) {
1843 			mutex_enter(&pd->pd_mutex);
1844 			fp_remote_port_offline(pd);
1845 			fctl_delist_did_table(port, pd);
1846 			pd->pd_state = PORT_DEVICE_VALID;
1847 			pd->pd_login_count = 0;
1848 			mutex_exit(&pd->pd_mutex);
1849 			pd = pd->pd_wwn_hnext;
1850 		}
1851 	}
1852 }
1853 
1854 
1855 /*
1856  * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1857  * Performs intializations for fc_packet_t structs.
1858  * Returns 0 for success or -1 for failure.
1859  *
1860  * This function allocates DMA handles for both command and responses.
1861  * Most of the ELSs used have both command and responses so it is strongly
1862  * desired to move them to cache constructor routine.
1863  *
1864  * Context: Can sleep iff called with KM_SLEEP flag.
1865  */
1866 static int
1867 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1868 {
1869 	int		(*cb) (caddr_t);
1870 	fc_packet_t	*pkt;
1871 	fp_cmd_t	*cmd = (fp_cmd_t *)buf;
1872 	fc_local_port_t *port = (fc_local_port_t *)cdarg;
1873 
1874 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1875 
1876 	cmd->cmd_next = NULL;
1877 	cmd->cmd_flags = 0;
1878 	cmd->cmd_dflags = 0;
1879 	cmd->cmd_job = NULL;
1880 	cmd->cmd_port = port;
1881 	pkt = &cmd->cmd_pkt;
1882 
1883 	if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
1884 		if (ddi_dma_alloc_handle(port->fp_fca_dip,
1885 		    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1886 		    &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1887 			return (-1);
1888 		}
1889 
1890 		if (ddi_dma_alloc_handle(port->fp_fca_dip,
1891 		    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1892 		    &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1893 			ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1894 			return (-1);
1895 		}
1896 	} else {
1897 		pkt->pkt_cmd_dma = 0;
1898 		pkt->pkt_resp_dma = 0;
1899 	}
1900 
1901 	pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1902 	pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1903 	    pkt->pkt_data_cookie_cnt = 0;
1904 	pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1905 	    pkt->pkt_data_cookie = NULL;
1906 	pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1907 
1908 	return (0);
1909 }
1910 
1911 
1912 /*
1913  * fp_cache_destructor: Destructor function for kmem_cache_create().
1914  * Performs un-intializations for fc_packet_t structs.
1915  */
1916 /* ARGSUSED */
1917 static void
1918 fp_cache_destructor(void *buf, void *cdarg)
1919 {
1920 	fp_cmd_t	*cmd = (fp_cmd_t *)buf;
1921 	fc_packet_t	*pkt;
1922 
1923 	pkt = &cmd->cmd_pkt;
1924 	if (pkt->pkt_cmd_dma) {
1925 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1926 	}
1927 
1928 	if (pkt->pkt_resp_dma) {
1929 		ddi_dma_free_handle(&pkt->pkt_resp_dma);
1930 	}
1931 }
1932 
1933 
1934 /*
1935  * Packet allocation for ELS and any other port driver commands
1936  *
1937  * Some ELSs like FLOGI and PLOGI are critical for topology and
1938  * device discovery and a system's inability to allocate memory
1939  * or DVMA resources while performing some of these critical ELSs
1940  * cause a lot of problem. While memory allocation failures are
1941  * rare, DVMA resource failures are common as the applications
1942  * are becoming more and more powerful on huge servers.	 So it
1943  * is desirable to have a framework support to reserve a fragment
1944  * of DVMA. So until this is fixed the correct way, the suffering
1945  * is huge whenever a LIP happens at a time DVMA resources are
1946  * drained out completely - So an attempt needs to be made to
1947  * KM_SLEEP while requesting for these resources, hoping that
1948  * the requests won't hang forever.
1949  *
1950  * The fc_remote_port_t argument is stored into the pkt_pd field in the
1951  * fc_packet_t struct prior to the fc_ulp_init_packet() call.  This
1952  * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1953  * If there is no fc_remote_port_t associated with the fc_packet_t, then
1954  * fp_alloc_pkt() must be called with pd set to NULL.
1955  *
1956  * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen,
1957  * actually, it's a design fault. But there's no problem for physical
1958  * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei.
1959  *
1960  * For FCAs that don't support DMA, such as fcoei, we will use
1961  * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len.
1962  */
1963 
1964 static fp_cmd_t *
1965 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1966     fc_remote_port_t *pd)
1967 {
1968 	int		rval;
1969 	ulong_t		real_len;
1970 	fp_cmd_t	*cmd;
1971 	fc_packet_t	*pkt;
1972 	int		(*cb) (caddr_t);
1973 	ddi_dma_cookie_t	pkt_cookie;
1974 	ddi_dma_cookie_t	*cp;
1975 	uint32_t		cnt;
1976 
1977 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1978 
1979 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1980 
1981 	cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1982 	if (cmd == NULL) {
1983 		return (cmd);
1984 	}
1985 
1986 	cmd->cmd_ulp_pkt = NULL;
1987 	cmd->cmd_flags = 0;
1988 	pkt = &cmd->cmd_pkt;
1989 	ASSERT(cmd->cmd_dflags == 0);
1990 
1991 	pkt->pkt_datalen = 0;
1992 	pkt->pkt_data = NULL;
1993 	pkt->pkt_state = 0;
1994 	pkt->pkt_action = 0;
1995 	pkt->pkt_reason = 0;
1996 	pkt->pkt_expln = 0;
1997 	pkt->pkt_cmd = NULL;
1998 	pkt->pkt_resp = NULL;
1999 	pkt->pkt_fctl_rsvd1 = NULL;
2000 	pkt->pkt_fctl_rsvd2 = NULL;
2001 
2002 	/*
2003 	 * Init pkt_pd with the given pointer; this must be done _before_
2004 	 * the call to fc_ulp_init_packet().
2005 	 */
2006 	pkt->pkt_pd = pd;
2007 
2008 	/* Now call the FCA driver to init its private, per-packet fields */
2009 	if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
2010 		goto alloc_pkt_failed;
2011 	}
2012 
2013 	if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2014 		ASSERT(pkt->pkt_cmd_dma != NULL);
2015 
2016 		rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2017 		    port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2018 		    cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2019 		    &pkt->pkt_cmd_acc);
2020 
2021 		if (rval != DDI_SUCCESS) {
2022 			goto alloc_pkt_failed;
2023 		}
2024 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2025 
2026 		if (real_len < cmd_len) {
2027 			goto alloc_pkt_failed;
2028 		}
2029 
2030 		rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2031 		    pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2032 		    DDI_DMA_CONSISTENT, cb, NULL,
2033 		    &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2034 
2035 		if (rval != DDI_DMA_MAPPED) {
2036 			goto alloc_pkt_failed;
2037 		}
2038 
2039 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2040 
2041 		if (pkt->pkt_cmd_cookie_cnt >
2042 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2043 			goto alloc_pkt_failed;
2044 		}
2045 
2046 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2047 
2048 		cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2049 		    pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2050 		    KM_NOSLEEP);
2051 
2052 		if (cp == NULL) {
2053 			goto alloc_pkt_failed;
2054 		}
2055 
2056 		*cp = pkt_cookie;
2057 		cp++;
2058 		for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2059 			ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2060 			*cp = pkt_cookie;
2061 		}
2062 	} else if (cmd_len != 0) {
2063 		pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP);
2064 		pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len;
2065 	}
2066 
2067 	if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2068 		ASSERT(pkt->pkt_resp_dma != NULL);
2069 
2070 		rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2071 		    port->fp_fca_tran->fca_acc_attr,
2072 		    DDI_DMA_CONSISTENT, cb, NULL,
2073 		    (caddr_t *)&pkt->pkt_resp, &real_len,
2074 		    &pkt->pkt_resp_acc);
2075 
2076 		if (rval != DDI_SUCCESS) {
2077 			goto alloc_pkt_failed;
2078 		}
2079 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2080 
2081 		if (real_len < resp_len) {
2082 			goto alloc_pkt_failed;
2083 		}
2084 
2085 		rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2086 		    pkt->pkt_resp, real_len, DDI_DMA_READ |
2087 		    DDI_DMA_CONSISTENT, cb, NULL,
2088 		    &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2089 
2090 		if (rval != DDI_DMA_MAPPED) {
2091 			goto alloc_pkt_failed;
2092 		}
2093 
2094 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2095 
2096 		if (pkt->pkt_resp_cookie_cnt >
2097 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2098 			goto alloc_pkt_failed;
2099 		}
2100 
2101 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2102 
2103 		cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2104 		    pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2105 		    KM_NOSLEEP);
2106 
2107 		if (cp == NULL) {
2108 			goto alloc_pkt_failed;
2109 		}
2110 
2111 		*cp = pkt_cookie;
2112 		cp++;
2113 		for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2114 			ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2115 			*cp = pkt_cookie;
2116 		}
2117 	} else if (resp_len != 0) {
2118 		pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP);
2119 		pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len;
2120 	}
2121 
2122 	pkt->pkt_cmdlen = cmd_len;
2123 	pkt->pkt_rsplen = resp_len;
2124 	pkt->pkt_ulp_private = cmd;
2125 
2126 	return (cmd);
2127 
2128 alloc_pkt_failed:
2129 
2130 	fp_free_dma(cmd);
2131 
2132 	if (pkt->pkt_cmd_cookie != NULL) {
2133 		kmem_free(pkt->pkt_cmd_cookie,
2134 		    pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2135 		pkt->pkt_cmd_cookie = NULL;
2136 	}
2137 
2138 	if (pkt->pkt_resp_cookie != NULL) {
2139 		kmem_free(pkt->pkt_resp_cookie,
2140 		    pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2141 		pkt->pkt_resp_cookie = NULL;
2142 	}
2143 
2144 	if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2145 		if (pkt->pkt_cmd) {
2146 			kmem_free(pkt->pkt_cmd, cmd_len);
2147 		}
2148 
2149 		if (pkt->pkt_resp) {
2150 			kmem_free(pkt->pkt_resp, resp_len);
2151 		}
2152 	}
2153 
2154 	kmem_cache_free(port->fp_pkt_cache, cmd);
2155 
2156 	return (NULL);
2157 }
2158 
2159 
2160 /*
2161  * Free FC packet
2162  */
2163 static void
2164 fp_free_pkt(fp_cmd_t *cmd)
2165 {
2166 	fc_local_port_t *port;
2167 	fc_packet_t	*pkt;
2168 
2169 	ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2170 
2171 	cmd->cmd_next = NULL;
2172 	cmd->cmd_job = NULL;
2173 	pkt = &cmd->cmd_pkt;
2174 	pkt->pkt_ulp_private = 0;
2175 	pkt->pkt_tran_flags = 0;
2176 	pkt->pkt_tran_type = 0;
2177 	port = cmd->cmd_port;
2178 
2179 	if (pkt->pkt_cmd_cookie != NULL) {
2180 		kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2181 		    sizeof (ddi_dma_cookie_t));
2182 		pkt->pkt_cmd_cookie = NULL;
2183 	}
2184 
2185 	if (pkt->pkt_resp_cookie != NULL) {
2186 		kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2187 		    sizeof (ddi_dma_cookie_t));
2188 		pkt->pkt_resp_cookie = NULL;
2189 	}
2190 
2191 	if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2192 		if (pkt->pkt_cmd) {
2193 			kmem_free(pkt->pkt_cmd,
2194 			    (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1);
2195 		}
2196 
2197 		if (pkt->pkt_resp) {
2198 			kmem_free(pkt->pkt_resp,
2199 			    (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2);
2200 		}
2201 	}
2202 
2203 	fp_free_dma(cmd);
2204 	(void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2205 	kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2206 }
2207 
2208 
2209 /*
2210  * Release DVMA resources
2211  */
2212 static void
2213 fp_free_dma(fp_cmd_t *cmd)
2214 {
2215 	fc_packet_t *pkt = &cmd->cmd_pkt;
2216 
2217 	pkt->pkt_cmdlen = 0;
2218 	pkt->pkt_rsplen = 0;
2219 	pkt->pkt_tran_type = 0;
2220 	pkt->pkt_tran_flags = 0;
2221 
2222 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2223 		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2224 	}
2225 
2226 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2227 		if (pkt->pkt_cmd_acc) {
2228 			ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2229 		}
2230 	}
2231 
2232 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2233 		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2234 	}
2235 
2236 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2237 		if (pkt->pkt_resp_acc) {
2238 			ddi_dma_mem_free(&pkt->pkt_resp_acc);
2239 		}
2240 	}
2241 	cmd->cmd_dflags = 0;
2242 }
2243 
2244 
2245 /*
2246  * Dedicated thread to perform various activities.  One thread for
2247  * each fc_local_port_t (driver soft state) instance.
2248  * Note, this effectively works out to one thread for each local
2249  * port, but there are also some Solaris taskq threads in use on a per-local
2250  * port basis; these also need to be taken into consideration.
2251  */
2252 static void
2253 fp_job_handler(fc_local_port_t *port)
2254 {
2255 	int			rval;
2256 	uint32_t		*d_id;
2257 	fc_remote_port_t	*pd;
2258 	job_request_t		*job;
2259 
2260 #ifndef	__lock_lint
2261 	/*
2262 	 * Solaris-internal stuff for proper operation of kernel threads
2263 	 * with Solaris CPR.
2264 	 */
2265 	CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2266 	    callb_generic_cpr, "fp_job_handler");
2267 #endif
2268 
2269 
2270 	/* Loop forever waiting for work to do */
2271 	for (;;) {
2272 
2273 		mutex_enter(&port->fp_mutex);
2274 
2275 		/*
2276 		 * Sleep if no work to do right now, or if we want
2277 		 * to suspend or power-down.
2278 		 */
2279 		while (port->fp_job_head == NULL ||
2280 		    (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2281 		    FP_SOFT_SUSPEND))) {
2282 			CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2283 			cv_wait(&port->fp_cv, &port->fp_mutex);
2284 			CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2285 		}
2286 
2287 		/*
2288 		 * OK, we've just been woken up, so retrieve the next entry
2289 		 * from the head of the job queue for this local port.
2290 		 */
2291 		job = fctl_deque_job(port);
2292 
2293 		/*
2294 		 * Handle all the fp driver's supported job codes here
2295 		 * in this big honkin' switch.
2296 		 */
2297 		switch (job->job_code) {
2298 		case JOB_PORT_SHUTDOWN:
2299 			/*
2300 			 * fp_port_shutdown() is only called from here. This
2301 			 * will prepare the local port instance (softstate)
2302 			 * for detaching.  This cancels timeout callbacks,
2303 			 * executes LOGOs with remote ports, cleans up tables,
2304 			 * and deallocates data structs.
2305 			 */
2306 			fp_port_shutdown(port, job);
2307 
2308 			/*
2309 			 * This will exit the job thread.
2310 			 */
2311 #ifndef __lock_lint
2312 			CALLB_CPR_EXIT(&(port->fp_cpr_info));
2313 #else
2314 			mutex_exit(&port->fp_mutex);
2315 #endif
2316 			fctl_jobdone(job);
2317 			thread_exit();
2318 
2319 			/* NOTREACHED */
2320 
2321 		case JOB_ATTACH_ULP: {
2322 			/*
2323 			 * This job is spawned in response to a ULP calling
2324 			 * fc_ulp_add().
2325 			 */
2326 
2327 			boolean_t do_attach_ulps = B_TRUE;
2328 
2329 			/*
2330 			 * If fp is detaching, we don't want to call
2331 			 * fp_startup_done as this asynchronous
2332 			 * notification may interfere with the re-attach.
2333 			 */
2334 
2335 			if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2336 			    FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2337 				do_attach_ulps = B_FALSE;
2338 			} else {
2339 				/*
2340 				 * We are going to force the transport
2341 				 * to attach to the ULPs, so set
2342 				 * fp_ulp_attach.  This will keep any
2343 				 * potential detach from occurring until
2344 				 * we are done.
2345 				 */
2346 				port->fp_ulp_attach = 1;
2347 			}
2348 
2349 			mutex_exit(&port->fp_mutex);
2350 
2351 			/*
2352 			 * NOTE: Since we just dropped the mutex, there is now
2353 			 * a race window where the fp_soft_state check above
2354 			 * could change here.  This race is covered because an
2355 			 * additional check was added in the functions hidden
2356 			 * under fp_startup_done().
2357 			 */
2358 			if (do_attach_ulps == B_TRUE) {
2359 				/*
2360 				 * This goes thru a bit of a convoluted call
2361 				 * chain before spawning off a DDI taskq
2362 				 * request to perform the actual attach
2363 				 * operations. Blocking can occur at a number
2364 				 * of points.
2365 				 */
2366 				fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2367 			}
2368 			job->job_result = FC_SUCCESS;
2369 			fctl_jobdone(job);
2370 			break;
2371 		}
2372 
2373 		case JOB_ULP_NOTIFY: {
2374 			/*
2375 			 * Pass state change notifications up to any/all
2376 			 * registered ULPs.
2377 			 */
2378 			uint32_t statec;
2379 
2380 			statec = job->job_ulp_listlen;
2381 			if (statec == FC_STATE_RESET_REQUESTED) {
2382 				port->fp_last_task = port->fp_task;
2383 				port->fp_task = FP_TASK_OFFLINE;
2384 				fp_port_offline(port, 0);
2385 				port->fp_task = port->fp_last_task;
2386 				port->fp_last_task = FP_TASK_IDLE;
2387 			}
2388 
2389 			if (--port->fp_statec_busy == 0) {
2390 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2391 			}
2392 
2393 			mutex_exit(&port->fp_mutex);
2394 
2395 			job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2396 			fctl_jobdone(job);
2397 			break;
2398 		}
2399 
2400 		case JOB_PLOGI_ONE:
2401 			/*
2402 			 * Issue a PLOGI to a single remote port. Multiple
2403 			 * PLOGIs to different remote ports may occur in
2404 			 * parallel.
2405 			 * This can create the fc_remote_port_t if it does not
2406 			 * already exist.
2407 			 */
2408 
2409 			mutex_exit(&port->fp_mutex);
2410 			d_id = (uint32_t *)job->job_private;
2411 			pd = fctl_get_remote_port_by_did(port, *d_id);
2412 
2413 			if (pd) {
2414 				mutex_enter(&pd->pd_mutex);
2415 				if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2416 					pd->pd_login_count++;
2417 					mutex_exit(&pd->pd_mutex);
2418 					job->job_result = FC_SUCCESS;
2419 					fctl_jobdone(job);
2420 					break;
2421 				}
2422 				mutex_exit(&pd->pd_mutex);
2423 			} else {
2424 				mutex_enter(&port->fp_mutex);
2425 				if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2426 					mutex_exit(&port->fp_mutex);
2427 					pd = fp_create_remote_port_by_ns(port,
2428 					    *d_id, KM_SLEEP);
2429 					if (pd == NULL) {
2430 						job->job_result = FC_FAILURE;
2431 						fctl_jobdone(job);
2432 						break;
2433 					}
2434 				} else {
2435 					mutex_exit(&port->fp_mutex);
2436 				}
2437 			}
2438 
2439 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2440 			job->job_counter = 1;
2441 
2442 			rval = fp_port_login(port, *d_id, job,
2443 			    FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2444 
2445 			if (rval != FC_SUCCESS) {
2446 				job->job_result = rval;
2447 				fctl_jobdone(job);
2448 			}
2449 			break;
2450 
2451 		case JOB_LOGO_ONE: {
2452 			/*
2453 			 * Issue a PLOGO to a single remote port. Multiple
2454 			 * PLOGOs to different remote ports may occur in
2455 			 * parallel.
2456 			 */
2457 			fc_remote_port_t *pd;
2458 
2459 #ifndef	__lock_lint
2460 			ASSERT(job->job_counter > 0);
2461 #endif
2462 
2463 			pd = (fc_remote_port_t *)job->job_ulp_pkts;
2464 
2465 			mutex_enter(&pd->pd_mutex);
2466 			if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2467 				mutex_exit(&pd->pd_mutex);
2468 				job->job_result = FC_LOGINREQ;
2469 				mutex_exit(&port->fp_mutex);
2470 				fctl_jobdone(job);
2471 				break;
2472 			}
2473 			if (pd->pd_login_count > 1) {
2474 				pd->pd_login_count--;
2475 				mutex_exit(&pd->pd_mutex);
2476 				job->job_result = FC_SUCCESS;
2477 				mutex_exit(&port->fp_mutex);
2478 				fctl_jobdone(job);
2479 				break;
2480 			}
2481 			mutex_exit(&pd->pd_mutex);
2482 			mutex_exit(&port->fp_mutex);
2483 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2484 			(void) fp_logout(port, pd, job);
2485 			break;
2486 		}
2487 
2488 		case JOB_FCIO_LOGIN:
2489 			/*
2490 			 * PLOGI initiated at ioctl request.
2491 			 */
2492 			mutex_exit(&port->fp_mutex);
2493 			job->job_result =
2494 			    fp_fcio_login(port, job->job_private, job);
2495 			fctl_jobdone(job);
2496 			break;
2497 
2498 		case JOB_FCIO_LOGOUT:
2499 			/*
2500 			 * PLOGO initiated at ioctl request.
2501 			 */
2502 			mutex_exit(&port->fp_mutex);
2503 			job->job_result =
2504 			    fp_fcio_logout(port, job->job_private, job);
2505 			fctl_jobdone(job);
2506 			break;
2507 
2508 		case JOB_PORT_GETMAP:
2509 		case JOB_PORT_GETMAP_PLOGI_ALL: {
2510 			port->fp_last_task = port->fp_task;
2511 			port->fp_task = FP_TASK_GETMAP;
2512 
2513 			switch (port->fp_topology) {
2514 			case FC_TOP_PRIVATE_LOOP:
2515 				job->job_counter = 1;
2516 
2517 				fp_get_loopmap(port, job);
2518 				mutex_exit(&port->fp_mutex);
2519 				fp_jobwait(job);
2520 				fctl_fillout_map(port,
2521 				    (fc_portmap_t **)job->job_private,
2522 				    (uint32_t *)job->job_arg, 1, 0, 0);
2523 				fctl_jobdone(job);
2524 				mutex_enter(&port->fp_mutex);
2525 				break;
2526 
2527 			case FC_TOP_PUBLIC_LOOP:
2528 			case FC_TOP_FABRIC:
2529 				mutex_exit(&port->fp_mutex);
2530 				job->job_counter = 1;
2531 
2532 				job->job_result = fp_ns_getmap(port,
2533 				    job, (fc_portmap_t **)job->job_private,
2534 				    (uint32_t *)job->job_arg,
2535 				    FCTL_GAN_START_ID);
2536 				fctl_jobdone(job);
2537 				mutex_enter(&port->fp_mutex);
2538 				break;
2539 
2540 			case FC_TOP_PT_PT:
2541 				mutex_exit(&port->fp_mutex);
2542 				fctl_fillout_map(port,
2543 				    (fc_portmap_t **)job->job_private,
2544 				    (uint32_t *)job->job_arg, 1, 0, 0);
2545 				fctl_jobdone(job);
2546 				mutex_enter(&port->fp_mutex);
2547 				break;
2548 
2549 			default:
2550 				mutex_exit(&port->fp_mutex);
2551 				fctl_jobdone(job);
2552 				mutex_enter(&port->fp_mutex);
2553 				break;
2554 			}
2555 			port->fp_task = port->fp_last_task;
2556 			port->fp_last_task = FP_TASK_IDLE;
2557 			mutex_exit(&port->fp_mutex);
2558 			break;
2559 		}
2560 
2561 		case JOB_PORT_OFFLINE: {
2562 			fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2563 
2564 			port->fp_last_task = port->fp_task;
2565 			port->fp_task = FP_TASK_OFFLINE;
2566 
2567 			if (port->fp_statec_busy > 2) {
2568 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2569 				fp_port_offline(port, 0);
2570 				if (--port->fp_statec_busy == 0) {
2571 					port->fp_soft_state &=
2572 					    ~FP_SOFT_IN_STATEC_CB;
2573 				}
2574 			} else {
2575 				fp_port_offline(port, 1);
2576 			}
2577 
2578 			port->fp_task = port->fp_last_task;
2579 			port->fp_last_task = FP_TASK_IDLE;
2580 
2581 			mutex_exit(&port->fp_mutex);
2582 
2583 			fctl_jobdone(job);
2584 			break;
2585 		}
2586 
2587 		case JOB_PORT_STARTUP: {
2588 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2589 				if (port->fp_statec_busy > 1) {
2590 					mutex_exit(&port->fp_mutex);
2591 					break;
2592 				}
2593 				mutex_exit(&port->fp_mutex);
2594 
2595 				FP_TRACE(FP_NHEAD2(9, rval),
2596 				    "Topology discovery failed");
2597 				break;
2598 			}
2599 
2600 			/*
2601 			 * Attempt building device handles in case
2602 			 * of private Loop.
2603 			 */
2604 			if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2605 				job->job_counter = 1;
2606 
2607 				fp_get_loopmap(port, job);
2608 				mutex_exit(&port->fp_mutex);
2609 				fp_jobwait(job);
2610 				mutex_enter(&port->fp_mutex);
2611 				if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2612 					ASSERT(port->fp_total_devices == 0);
2613 					port->fp_total_devices =
2614 					    port->fp_dev_count;
2615 				}
2616 			} else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2617 				/*
2618 				 * Hack to avoid state changes going up early
2619 				 */
2620 				port->fp_statec_busy++;
2621 				port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2622 
2623 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2624 				fp_fabric_online(port, job);
2625 				job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2626 			}
2627 			mutex_exit(&port->fp_mutex);
2628 			fctl_jobdone(job);
2629 			break;
2630 		}
2631 
2632 		case JOB_PORT_ONLINE: {
2633 			char		*newtop;
2634 			char		*oldtop;
2635 			uint32_t	old_top;
2636 
2637 			fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2638 
2639 			/*
2640 			 * Bail out early if there are a lot of
2641 			 * state changes in the pipeline
2642 			 */
2643 			if (port->fp_statec_busy > 1) {
2644 				--port->fp_statec_busy;
2645 				mutex_exit(&port->fp_mutex);
2646 				fctl_jobdone(job);
2647 				break;
2648 			}
2649 
2650 			switch (old_top = port->fp_topology) {
2651 			case FC_TOP_PRIVATE_LOOP:
2652 				oldtop = "Private Loop";
2653 				break;
2654 
2655 			case FC_TOP_PUBLIC_LOOP:
2656 				oldtop = "Public Loop";
2657 				break;
2658 
2659 			case FC_TOP_PT_PT:
2660 				oldtop = "Point to Point";
2661 				break;
2662 
2663 			case FC_TOP_FABRIC:
2664 				oldtop = "Fabric";
2665 				break;
2666 
2667 			default:
2668 				oldtop = NULL;
2669 				break;
2670 			}
2671 
2672 			port->fp_last_task = port->fp_task;
2673 			port->fp_task = FP_TASK_ONLINE;
2674 
2675 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2676 
2677 				port->fp_task = port->fp_last_task;
2678 				port->fp_last_task = FP_TASK_IDLE;
2679 
2680 				if (port->fp_statec_busy > 1) {
2681 					--port->fp_statec_busy;
2682 					mutex_exit(&port->fp_mutex);
2683 					break;
2684 				}
2685 
2686 				port->fp_state = FC_STATE_OFFLINE;
2687 
2688 				FP_TRACE(FP_NHEAD2(9, rval),
2689 				    "Topology discovery failed");
2690 
2691 				if (--port->fp_statec_busy == 0) {
2692 					port->fp_soft_state &=
2693 					    ~FP_SOFT_IN_STATEC_CB;
2694 				}
2695 
2696 				if (port->fp_offline_tid == NULL) {
2697 					port->fp_offline_tid =
2698 					    timeout(fp_offline_timeout,
2699 					    (caddr_t)port, fp_offline_ticks);
2700 				}
2701 
2702 				mutex_exit(&port->fp_mutex);
2703 				break;
2704 			}
2705 
2706 			switch (port->fp_topology) {
2707 			case FC_TOP_PRIVATE_LOOP:
2708 				newtop = "Private Loop";
2709 				break;
2710 
2711 			case FC_TOP_PUBLIC_LOOP:
2712 				newtop = "Public Loop";
2713 				break;
2714 
2715 			case FC_TOP_PT_PT:
2716 				newtop = "Point to Point";
2717 				break;
2718 
2719 			case FC_TOP_FABRIC:
2720 				newtop = "Fabric";
2721 				break;
2722 
2723 			default:
2724 				newtop = NULL;
2725 				break;
2726 			}
2727 
2728 			if (oldtop && newtop && strcmp(oldtop, newtop)) {
2729 				fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2730 				    "Change in FC Topology old = %s new = %s",
2731 				    oldtop, newtop);
2732 			}
2733 
2734 			switch (port->fp_topology) {
2735 			case FC_TOP_PRIVATE_LOOP: {
2736 				int orphan = (old_top == FC_TOP_FABRIC ||
2737 				    old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2738 
2739 				mutex_exit(&port->fp_mutex);
2740 				fp_loop_online(port, job, orphan);
2741 				break;
2742 			}
2743 
2744 			case FC_TOP_PUBLIC_LOOP:
2745 				/* FALLTHROUGH */
2746 			case FC_TOP_FABRIC:
2747 				fp_fabric_online(port, job);
2748 				mutex_exit(&port->fp_mutex);
2749 				break;
2750 
2751 			case FC_TOP_PT_PT:
2752 				fp_p2p_online(port, job);
2753 				mutex_exit(&port->fp_mutex);
2754 				break;
2755 
2756 			default:
2757 				if (--port->fp_statec_busy != 0) {
2758 					/*
2759 					 * Watch curiously at what the next
2760 					 * state transition can do.
2761 					 */
2762 					mutex_exit(&port->fp_mutex);
2763 					break;
2764 				}
2765 
2766 				FP_TRACE(FP_NHEAD2(9, 0),
2767 				    "Topology Unknown, Offlining the port..");
2768 
2769 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2770 				port->fp_state = FC_STATE_OFFLINE;
2771 
2772 				if (port->fp_offline_tid == NULL) {
2773 					port->fp_offline_tid =
2774 					    timeout(fp_offline_timeout,
2775 					    (caddr_t)port, fp_offline_ticks);
2776 				}
2777 				mutex_exit(&port->fp_mutex);
2778 				break;
2779 			}
2780 
2781 			mutex_enter(&port->fp_mutex);
2782 
2783 			port->fp_task = port->fp_last_task;
2784 			port->fp_last_task = FP_TASK_IDLE;
2785 
2786 			mutex_exit(&port->fp_mutex);
2787 
2788 			fctl_jobdone(job);
2789 			break;
2790 		}
2791 
2792 		case JOB_PLOGI_GROUP: {
2793 			mutex_exit(&port->fp_mutex);
2794 			fp_plogi_group(port, job);
2795 			break;
2796 		}
2797 
2798 		case JOB_UNSOL_REQUEST: {
2799 			mutex_exit(&port->fp_mutex);
2800 			fp_handle_unsol_buf(port,
2801 			    (fc_unsol_buf_t *)job->job_private, job);
2802 			fctl_dealloc_job(job);
2803 			break;
2804 		}
2805 
2806 		case JOB_NS_CMD: {
2807 			fctl_ns_req_t *ns_cmd;
2808 
2809 			mutex_exit(&port->fp_mutex);
2810 
2811 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2812 			ns_cmd = (fctl_ns_req_t *)job->job_private;
2813 			if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2814 			    ns_cmd->ns_cmd_code > NS_DA_ID) {
2815 				job->job_result = FC_BADCMD;
2816 				fctl_jobdone(job);
2817 				break;
2818 			}
2819 
2820 			if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2821 				if (ns_cmd->ns_pd != NULL) {
2822 					job->job_result = FC_BADOBJECT;
2823 					fctl_jobdone(job);
2824 					break;
2825 				}
2826 
2827 				job->job_counter = 1;
2828 
2829 				rval = fp_ns_reg(port, ns_cmd->ns_pd,
2830 				    ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2831 
2832 				if (rval != FC_SUCCESS) {
2833 					job->job_result = rval;
2834 					fctl_jobdone(job);
2835 				}
2836 				break;
2837 			}
2838 			job->job_result = FC_SUCCESS;
2839 			job->job_counter = 1;
2840 
2841 			rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2842 			if (rval != FC_SUCCESS) {
2843 				fctl_jobdone(job);
2844 			}
2845 			break;
2846 		}
2847 
2848 		case JOB_LINK_RESET: {
2849 			la_wwn_t *pwwn;
2850 			uint32_t topology;
2851 
2852 			pwwn = (la_wwn_t *)job->job_private;
2853 			ASSERT(pwwn != NULL);
2854 
2855 			topology = port->fp_topology;
2856 			mutex_exit(&port->fp_mutex);
2857 
2858 			if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2859 			    topology == FC_TOP_PRIVATE_LOOP) {
2860 				job->job_flags |= JOB_TYPE_FP_ASYNC;
2861 				rval = port->fp_fca_tran->fca_reset(
2862 				    port->fp_fca_handle, FC_FCA_LINK_RESET);
2863 				job->job_result = rval;
2864 				fp_jobdone(job);
2865 			} else {
2866 				ASSERT((job->job_flags &
2867 				    JOB_TYPE_FP_ASYNC) == 0);
2868 
2869 				if (FC_IS_TOP_SWITCH(topology)) {
2870 					rval = fp_remote_lip(port, pwwn,
2871 					    KM_SLEEP, job);
2872 				} else {
2873 					rval = FC_FAILURE;
2874 				}
2875 				if (rval != FC_SUCCESS) {
2876 					job->job_result = rval;
2877 				}
2878 				fctl_jobdone(job);
2879 			}
2880 			break;
2881 		}
2882 
2883 		default:
2884 			mutex_exit(&port->fp_mutex);
2885 			job->job_result = FC_BADCMD;
2886 			fctl_jobdone(job);
2887 			break;
2888 		}
2889 	}
2890 	/* NOTREACHED */
2891 }
2892 
2893 
2894 /*
2895  * Perform FC port bring up initialization
2896  */
2897 static int
2898 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2899 {
2900 	int		rval;
2901 	uint32_t	state;
2902 	uint32_t	src_id;
2903 	fc_lilpmap_t	*lilp_map;
2904 
2905 	ASSERT(MUTEX_HELD(&port->fp_mutex));
2906 	ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2907 
2908 	FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2909 	    " port=%p, job=%p", port, job);
2910 
2911 	port->fp_topology = FC_TOP_UNKNOWN;
2912 	port->fp_port_id.port_id = 0;
2913 	state = FC_PORT_STATE_MASK(port->fp_state);
2914 
2915 	if (state == FC_STATE_OFFLINE) {
2916 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2917 		job->job_result = FC_OFFLINE;
2918 		mutex_exit(&port->fp_mutex);
2919 		fctl_jobdone(job);
2920 		mutex_enter(&port->fp_mutex);
2921 		return (FC_OFFLINE);
2922 	}
2923 
2924 	if (state == FC_STATE_LOOP) {
2925 		port->fp_port_type.port_type = FC_NS_PORT_NL;
2926 		mutex_exit(&port->fp_mutex);
2927 
2928 		lilp_map = &port->fp_lilp_map;
2929 		if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2930 			job->job_result = FC_FAILURE;
2931 			fctl_jobdone(job);
2932 
2933 			FP_TRACE(FP_NHEAD1(9, rval),
2934 			    "LILP map Invalid or not present");
2935 			mutex_enter(&port->fp_mutex);
2936 			return (FC_FAILURE);
2937 		}
2938 
2939 		if (lilp_map->lilp_length == 0) {
2940 			job->job_result = FC_NO_MAP;
2941 			fctl_jobdone(job);
2942 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2943 			    "LILP map length zero");
2944 			mutex_enter(&port->fp_mutex);
2945 			return (FC_NO_MAP);
2946 		}
2947 		src_id = lilp_map->lilp_myalpa & 0xFF;
2948 	} else {
2949 		fc_remote_port_t	*pd;
2950 		fc_fca_pm_t		pm;
2951 		fc_fca_p2p_info_t	p2p_info;
2952 		int			pd_recepient;
2953 
2954 		/*
2955 		 * Get P2P remote port info if possible
2956 		 */
2957 		bzero((caddr_t)&pm, sizeof (pm));
2958 
2959 		pm.pm_cmd_flags = FC_FCA_PM_READ;
2960 		pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2961 		pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2962 		pm.pm_data_buf = (caddr_t)&p2p_info;
2963 
2964 		rval = port->fp_fca_tran->fca_port_manage(
2965 		    port->fp_fca_handle, &pm);
2966 
2967 		if (rval == FC_SUCCESS) {
2968 			port->fp_port_id.port_id = p2p_info.fca_d_id;
2969 			port->fp_port_type.port_type = FC_NS_PORT_N;
2970 			port->fp_topology = FC_TOP_PT_PT;
2971 			port->fp_total_devices = 1;
2972 			pd_recepient = fctl_wwn_cmp(
2973 			    &port->fp_service_params.nport_ww_name,
2974 			    &p2p_info.pwwn) < 0 ?
2975 			    PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2976 			mutex_exit(&port->fp_mutex);
2977 			pd = fctl_create_remote_port(port,
2978 			    &p2p_info.nwwn,
2979 			    &p2p_info.pwwn,
2980 			    p2p_info.d_id,
2981 			    pd_recepient, KM_NOSLEEP);
2982 			FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2983 			    " P2P port=%p pd=%p fp %x pd %x", port, pd,
2984 			    port->fp_port_id.port_id, p2p_info.d_id);
2985 			mutex_enter(&port->fp_mutex);
2986 			return (FC_SUCCESS);
2987 		}
2988 		port->fp_port_type.port_type = FC_NS_PORT_N;
2989 		mutex_exit(&port->fp_mutex);
2990 		src_id = 0;
2991 	}
2992 
2993 	job->job_counter = 1;
2994 	job->job_result = FC_SUCCESS;
2995 
2996 	if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2997 	    KM_SLEEP)) != FC_SUCCESS) {
2998 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2999 		job->job_result = FC_FAILURE;
3000 		fctl_jobdone(job);
3001 
3002 		mutex_enter(&port->fp_mutex);
3003 		if (port->fp_statec_busy <= 1) {
3004 			mutex_exit(&port->fp_mutex);
3005 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
3006 			    "Couldn't transport FLOGI");
3007 			mutex_enter(&port->fp_mutex);
3008 		}
3009 		return (FC_FAILURE);
3010 	}
3011 
3012 	fp_jobwait(job);
3013 
3014 	mutex_enter(&port->fp_mutex);
3015 	if (job->job_result == FC_SUCCESS) {
3016 		if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3017 			mutex_exit(&port->fp_mutex);
3018 			fp_ns_init(port, job, KM_SLEEP);
3019 			mutex_enter(&port->fp_mutex);
3020 		}
3021 	} else {
3022 		if (state == FC_STATE_LOOP) {
3023 			port->fp_topology = FC_TOP_PRIVATE_LOOP;
3024 			port->fp_port_id.port_id =
3025 			    port->fp_lilp_map.lilp_myalpa & 0xFF;
3026 		}
3027 	}
3028 
3029 	FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
3030 	    port, job);
3031 
3032 	return (FC_SUCCESS);
3033 }
3034 
3035 
3036 /*
3037  * Perform ULP invocations following FC port startup
3038  */
3039 /* ARGSUSED */
3040 static void
3041 fp_startup_done(opaque_t arg, uchar_t result)
3042 {
3043 	fc_local_port_t *port = arg;
3044 
3045 	fp_attach_ulps(port, FC_CMD_ATTACH);
3046 
3047 	FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3048 }
3049 
3050 
3051 /*
3052  * Perform ULP port attach
3053  */
3054 static void
3055 fp_ulp_port_attach(void *arg)
3056 {
3057 	fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3058 	fc_local_port_t	 *port = att->att_port;
3059 
3060 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3061 	    " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3062 
3063 	fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3064 
3065 	if (att->att_need_pm_idle == B_TRUE) {
3066 		fctl_idle_port(port);
3067 	}
3068 
3069 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3070 	    " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3071 
3072 	mutex_enter(&att->att_port->fp_mutex);
3073 	att->att_port->fp_ulp_attach = 0;
3074 
3075 	port->fp_task = port->fp_last_task;
3076 	port->fp_last_task = FP_TASK_IDLE;
3077 
3078 	cv_signal(&att->att_port->fp_attach_cv);
3079 
3080 	mutex_exit(&att->att_port->fp_mutex);
3081 
3082 	kmem_free(att, sizeof (fp_soft_attach_t));
3083 }
3084 
3085 /*
3086  * Entry point to funnel all requests down to FCAs
3087  */
3088 static int
3089 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3090 {
3091 	int rval;
3092 
3093 	mutex_enter(&port->fp_mutex);
3094 	if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3095 	    (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3096 	    FC_STATE_OFFLINE))) {
3097 		/*
3098 		 * This means there is more than one state change
3099 		 * at this point of time - Since they are processed
3100 		 * serially, any processing of the current one should
3101 		 * be failed, failed and move up in processing the next
3102 		 */
3103 		cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3104 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3105 		if (cmd->cmd_job) {
3106 			/*
3107 			 * A state change that is going to be invalidated
3108 			 * by another one already in the port driver's queue
3109 			 * need not go up to all ULPs. This will minimize
3110 			 * needless processing and ripples in ULP modules
3111 			 */
3112 			cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3113 		}
3114 		mutex_exit(&port->fp_mutex);
3115 		return (FC_STATEC_BUSY);
3116 	}
3117 
3118 	if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3119 		cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3120 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3121 		mutex_exit(&port->fp_mutex);
3122 
3123 		return (FC_OFFLINE);
3124 	}
3125 	mutex_exit(&port->fp_mutex);
3126 
3127 	rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3128 	if (rval != FC_SUCCESS) {
3129 		if (rval == FC_TRAN_BUSY) {
3130 			cmd->cmd_retry_interval = fp_retry_delay;
3131 			rval = fp_retry_cmd(&cmd->cmd_pkt);
3132 			if (rval == FC_FAILURE) {
3133 				cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3134 			}
3135 		}
3136 	} else {
3137 		mutex_enter(&port->fp_mutex);
3138 		port->fp_out_fpcmds++;
3139 		mutex_exit(&port->fp_mutex);
3140 	}
3141 
3142 	return (rval);
3143 }
3144 
3145 
3146 /*
3147  * Each time a timeout kicks in, walk the wait queue, decrement the
3148  * the retry_interval, when the retry_interval becomes less than
3149  * or equal to zero, re-transport the command: If the re-transport
3150  * fails with BUSY, enqueue the command in the wait queue.
3151  *
3152  * In order to prevent looping forever because of commands enqueued
3153  * from within this function itself, save the current tail pointer
3154  * (in cur_tail) and exit the loop after serving this command.
3155  */
3156 static void
3157 fp_resendcmd(void *port_handle)
3158 {
3159 	int		rval;
3160 	fc_local_port_t	*port;
3161 	fp_cmd_t	*cmd;
3162 	fp_cmd_t	*cur_tail;
3163 
3164 	port = port_handle;
3165 	mutex_enter(&port->fp_mutex);
3166 	cur_tail = port->fp_wait_tail;
3167 	mutex_exit(&port->fp_mutex);
3168 
3169 	while ((cmd = fp_deque_cmd(port)) != NULL) {
3170 		cmd->cmd_retry_interval -= fp_retry_ticker;
3171 		/* Check if we are detaching */
3172 		if (port->fp_soft_state &
3173 		    (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3174 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3175 			cmd->cmd_pkt.pkt_reason = 0;
3176 			fp_iodone(cmd);
3177 		} else if (cmd->cmd_retry_interval <= 0) {
3178 			rval = cmd->cmd_transport(port->fp_fca_handle,
3179 			    &cmd->cmd_pkt);
3180 
3181 			if (rval != FC_SUCCESS) {
3182 				if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3183 					if (--cmd->cmd_retry_count) {
3184 						fp_enque_cmd(port, cmd);
3185 						if (cmd == cur_tail) {
3186 							break;
3187 						}
3188 						continue;
3189 					}
3190 					cmd->cmd_pkt.pkt_state =
3191 					    FC_PKT_TRAN_BSY;
3192 				} else {
3193 					cmd->cmd_pkt.pkt_state =
3194 					    FC_PKT_TRAN_ERROR;
3195 				}
3196 				cmd->cmd_pkt.pkt_reason = 0;
3197 				fp_iodone(cmd);
3198 			} else {
3199 				mutex_enter(&port->fp_mutex);
3200 				port->fp_out_fpcmds++;
3201 				mutex_exit(&port->fp_mutex);
3202 			}
3203 		} else {
3204 			fp_enque_cmd(port, cmd);
3205 		}
3206 
3207 		if (cmd == cur_tail) {
3208 			break;
3209 		}
3210 	}
3211 
3212 	mutex_enter(&port->fp_mutex);
3213 	if (port->fp_wait_head) {
3214 		timeout_id_t tid;
3215 
3216 		mutex_exit(&port->fp_mutex);
3217 		tid = timeout(fp_resendcmd, (caddr_t)port,
3218 		    fp_retry_ticks);
3219 		mutex_enter(&port->fp_mutex);
3220 		port->fp_wait_tid = tid;
3221 	} else {
3222 		port->fp_wait_tid = NULL;
3223 	}
3224 	mutex_exit(&port->fp_mutex);
3225 }
3226 
3227 
3228 /*
3229  * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3230  *
3231  * Yes, as you can see below, cmd_retry_count is used here too.	 That means
3232  * the retries for BUSY are less if there were transport failures (transport
3233  * failure means fca_transport failure). The goal is not to exceed overall
3234  * retries set in the cmd_retry_count (whatever may be the reason for retry)
3235  *
3236  * Return Values:
3237  *	FC_SUCCESS
3238  *	FC_FAILURE
3239  */
3240 static int
3241 fp_retry_cmd(fc_packet_t *pkt)
3242 {
3243 	fp_cmd_t *cmd;
3244 
3245 	cmd = pkt->pkt_ulp_private;
3246 
3247 	if (--cmd->cmd_retry_count) {
3248 		fp_enque_cmd(cmd->cmd_port, cmd);
3249 		return (FC_SUCCESS);
3250 	} else {
3251 		return (FC_FAILURE);
3252 	}
3253 }
3254 
3255 
3256 /*
3257  * Queue up FC packet for deferred retry
3258  */
3259 static void
3260 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3261 {
3262 	timeout_id_t tid;
3263 
3264 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3265 
3266 #ifdef	DEBUG
3267 	fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3268 	    "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3269 #endif
3270 
3271 	mutex_enter(&port->fp_mutex);
3272 	if (port->fp_wait_tail) {
3273 		port->fp_wait_tail->cmd_next = cmd;
3274 		port->fp_wait_tail = cmd;
3275 	} else {
3276 		ASSERT(port->fp_wait_head == NULL);
3277 		port->fp_wait_head = port->fp_wait_tail = cmd;
3278 		if (port->fp_wait_tid == NULL) {
3279 			mutex_exit(&port->fp_mutex);
3280 			tid = timeout(fp_resendcmd, (caddr_t)port,
3281 			    fp_retry_ticks);
3282 			mutex_enter(&port->fp_mutex);
3283 			port->fp_wait_tid = tid;
3284 		}
3285 	}
3286 	mutex_exit(&port->fp_mutex);
3287 }
3288 
3289 
3290 /*
3291  * Handle all RJT codes
3292  */
3293 static int
3294 fp_handle_reject(fc_packet_t *pkt)
3295 {
3296 	int		rval = FC_FAILURE;
3297 	uchar_t		next_class;
3298 	fp_cmd_t	*cmd;
3299 	fc_local_port_t *port;
3300 
3301 	cmd = pkt->pkt_ulp_private;
3302 	port = cmd->cmd_port;
3303 
3304 	switch (pkt->pkt_state) {
3305 	case FC_PKT_FABRIC_RJT:
3306 	case FC_PKT_NPORT_RJT:
3307 		if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3308 			next_class = fp_get_nextclass(cmd->cmd_port,
3309 			    FC_TRAN_CLASS(pkt->pkt_tran_flags));
3310 
3311 			if (next_class == FC_TRAN_CLASS_INVALID) {
3312 				return (rval);
3313 			}
3314 			pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3315 			pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3316 
3317 			rval = fp_sendcmd(cmd->cmd_port, cmd,
3318 			    cmd->cmd_port->fp_fca_handle);
3319 
3320 			if (rval != FC_SUCCESS) {
3321 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
3322 			}
3323 		}
3324 		break;
3325 
3326 	case FC_PKT_LS_RJT:
3327 	case FC_PKT_BA_RJT:
3328 		if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3329 		    (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3330 			cmd->cmd_retry_interval = fp_retry_delay;
3331 			rval = fp_retry_cmd(pkt);
3332 		}
3333 		break;
3334 
3335 	case FC_PKT_FS_RJT:
3336 		if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) ||
3337 		    ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) &&
3338 		    (pkt->pkt_expln == 0x00))) {
3339 			cmd->cmd_retry_interval = fp_retry_delay;
3340 			rval = fp_retry_cmd(pkt);
3341 		}
3342 		break;
3343 
3344 	case FC_PKT_LOCAL_RJT:
3345 		if (pkt->pkt_reason == FC_REASON_QFULL) {
3346 			cmd->cmd_retry_interval = fp_retry_delay;
3347 			rval = fp_retry_cmd(pkt);
3348 		}
3349 		break;
3350 
3351 	default:
3352 		FP_TRACE(FP_NHEAD1(1, 0),
3353 		    "fp_handle_reject(): Invalid pkt_state");
3354 		break;
3355 	}
3356 
3357 	return (rval);
3358 }
3359 
3360 
3361 /*
3362  * Return the next class of service supported by the FCA
3363  */
3364 static uchar_t
3365 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3366 {
3367 	uchar_t next_class;
3368 
3369 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3370 
3371 	switch (cur_class) {
3372 	case FC_TRAN_CLASS_INVALID:
3373 		if (port->fp_cos & FC_NS_CLASS1) {
3374 			next_class = FC_TRAN_CLASS1;
3375 			break;
3376 		}
3377 		/* FALLTHROUGH */
3378 
3379 	case FC_TRAN_CLASS1:
3380 		if (port->fp_cos & FC_NS_CLASS2) {
3381 			next_class = FC_TRAN_CLASS2;
3382 			break;
3383 		}
3384 		/* FALLTHROUGH */
3385 
3386 	case FC_TRAN_CLASS2:
3387 		if (port->fp_cos & FC_NS_CLASS3) {
3388 			next_class = FC_TRAN_CLASS3;
3389 			break;
3390 		}
3391 		/* FALLTHROUGH */
3392 
3393 	case FC_TRAN_CLASS3:
3394 	default:
3395 		next_class = FC_TRAN_CLASS_INVALID;
3396 		break;
3397 	}
3398 
3399 	return (next_class);
3400 }
3401 
3402 
3403 /*
3404  * Determine if a class of service is supported by the FCA
3405  */
3406 static int
3407 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3408 {
3409 	int rval;
3410 
3411 	switch (tran_class) {
3412 	case FC_TRAN_CLASS1:
3413 		if (cos & FC_NS_CLASS1) {
3414 			rval = FC_SUCCESS;
3415 		} else {
3416 			rval = FC_FAILURE;
3417 		}
3418 		break;
3419 
3420 	case FC_TRAN_CLASS2:
3421 		if (cos & FC_NS_CLASS2) {
3422 			rval = FC_SUCCESS;
3423 		} else {
3424 			rval = FC_FAILURE;
3425 		}
3426 		break;
3427 
3428 	case FC_TRAN_CLASS3:
3429 		if (cos & FC_NS_CLASS3) {
3430 			rval = FC_SUCCESS;
3431 		} else {
3432 			rval = FC_FAILURE;
3433 		}
3434 		break;
3435 
3436 	default:
3437 		rval = FC_FAILURE;
3438 		break;
3439 	}
3440 
3441 	return (rval);
3442 }
3443 
3444 
3445 /*
3446  * Dequeue FC packet for retry
3447  */
3448 static fp_cmd_t *
3449 fp_deque_cmd(fc_local_port_t *port)
3450 {
3451 	fp_cmd_t *cmd;
3452 
3453 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3454 
3455 	mutex_enter(&port->fp_mutex);
3456 
3457 	if (port->fp_wait_head == NULL) {
3458 		/*
3459 		 * To avoid races, NULL the fp_wait_tid as
3460 		 * we are about to exit the timeout thread.
3461 		 */
3462 		port->fp_wait_tid = NULL;
3463 		mutex_exit(&port->fp_mutex);
3464 		return (NULL);
3465 	}
3466 
3467 	cmd = port->fp_wait_head;
3468 	port->fp_wait_head = cmd->cmd_next;
3469 	cmd->cmd_next = NULL;
3470 
3471 	if (port->fp_wait_head == NULL) {
3472 		port->fp_wait_tail = NULL;
3473 	}
3474 	mutex_exit(&port->fp_mutex);
3475 
3476 	return (cmd);
3477 }
3478 
3479 
3480 /*
3481  * Wait for job completion
3482  */
3483 static void
3484 fp_jobwait(job_request_t *job)
3485 {
3486 	sema_p(&job->job_port_sema);
3487 }
3488 
3489 
3490 /*
3491  * Convert FC packet state to FC errno
3492  */
3493 int
3494 fp_state_to_rval(uchar_t state)
3495 {
3496 	int count;
3497 
3498 	for (count = 0; count < sizeof (fp_xlat) /
3499 	    sizeof (fp_xlat[0]); count++) {
3500 		if (fp_xlat[count].xlat_state == state) {
3501 			return (fp_xlat[count].xlat_rval);
3502 		}
3503 	}
3504 
3505 	return (FC_FAILURE);
3506 }
3507 
3508 
3509 /*
3510  * For Synchronous I/O requests, the caller is
3511  * expected to do fctl_jobdone(if necessary)
3512  *
3513  * We want to preserve at least one failure in the
3514  * job_result if it happens.
3515  *
3516  */
3517 static void
3518 fp_iodone(fp_cmd_t *cmd)
3519 {
3520 	fc_packet_t		*ulp_pkt = cmd->cmd_ulp_pkt;
3521 	job_request_t		*job = cmd->cmd_job;
3522 	fc_remote_port_t	*pd = cmd->cmd_pkt.pkt_pd;
3523 
3524 	ASSERT(job != NULL);
3525 	ASSERT(cmd->cmd_port != NULL);
3526 	ASSERT(&cmd->cmd_pkt != NULL);
3527 
3528 	mutex_enter(&job->job_mutex);
3529 	if (job->job_result == FC_SUCCESS) {
3530 		job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3531 	}
3532 	mutex_exit(&job->job_mutex);
3533 
3534 	if (pd) {
3535 		mutex_enter(&pd->pd_mutex);
3536 		pd->pd_flags = PD_IDLE;
3537 		mutex_exit(&pd->pd_mutex);
3538 	}
3539 
3540 	if (ulp_pkt) {
3541 		if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3542 		    FP_IS_PKT_ERROR(ulp_pkt)) {
3543 			fc_local_port_t		*port;
3544 			fc_remote_node_t	*node;
3545 
3546 			port = cmd->cmd_port;
3547 
3548 			mutex_enter(&pd->pd_mutex);
3549 			pd->pd_state = PORT_DEVICE_INVALID;
3550 			pd->pd_ref_count--;
3551 			node = pd->pd_remote_nodep;
3552 			mutex_exit(&pd->pd_mutex);
3553 
3554 			ASSERT(node != NULL);
3555 			ASSERT(port != NULL);
3556 
3557 			if (fctl_destroy_remote_port(port, pd) == 0) {
3558 				fctl_destroy_remote_node(node);
3559 			}
3560 
3561 			ulp_pkt->pkt_pd = NULL;
3562 		}
3563 
3564 		ulp_pkt->pkt_comp(ulp_pkt);
3565 	}
3566 
3567 	fp_free_pkt(cmd);
3568 	fp_jobdone(job);
3569 }
3570 
3571 
3572 /*
3573  * Job completion handler
3574  */
3575 static void
3576 fp_jobdone(job_request_t *job)
3577 {
3578 	mutex_enter(&job->job_mutex);
3579 	ASSERT(job->