1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * NOT a DDI compliant Sun Fibre Channel port driver(fp)
26  *
27  */
28 
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/uio.h>
34 #include <sys/buf.h>
35 #include <sys/modctl.h>
36 #include <sys/open.h>
37 #include <sys/file.h>
38 #include <sys/kmem.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/thread.h>
42 #include <sys/var.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stat.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/promif.h>
48 #include <sys/nvpair.h>
49 #include <sys/byteorder.h>
50 #include <sys/scsi/scsi.h>
51 #include <sys/fibre-channel/fc.h>
52 #include <sys/fibre-channel/impl/fc_ulpif.h>
53 #include <sys/fibre-channel/impl/fc_fcaif.h>
54 #include <sys/fibre-channel/impl/fctl_private.h>
55 #include <sys/fibre-channel/impl/fc_portif.h>
56 #include <sys/fibre-channel/impl/fp.h>
57 
58 /* These are defined in fctl.c! */
59 extern int did_table_size;
60 extern int pwwn_table_size;
61 
62 static struct cb_ops fp_cb_ops = {
63 	fp_open,			/* open */
64 	fp_close,			/* close */
65 	nodev,				/* strategy */
66 	nodev,				/* print */
67 	nodev,				/* dump */
68 	nodev,				/* read */
69 	nodev,				/* write */
70 	fp_ioctl,			/* ioctl */
71 	nodev,				/* devmap */
72 	nodev,				/* mmap */
73 	nodev,				/* segmap */
74 	nochpoll,			/* chpoll */
75 	ddi_prop_op,			/* cb_prop_op */
76 	0,				/* streamtab */
77 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
78 	CB_REV,				/* rev */
79 	nodev,				/* aread */
80 	nodev				/* awrite */
81 };
82 
83 static struct dev_ops fp_ops = {
84 	DEVO_REV,	/* build revision */
85 	0,		/* reference count */
86 	fp_getinfo,	/* getinfo */
87 	nulldev,	/* identify - Obsoleted */
88 	nulldev,	/* probe */
89 	fp_attach,	/* attach */
90 	fp_detach,	/* detach */
91 	nodev,		/* reset */
92 	&fp_cb_ops,	/* cb_ops */
93 	NULL,		/* bus_ops */
94 	fp_power	/* power */
95 };
96 
97 #define	FP_VERSION		"1.98"
98 #define	FP_NAME_VERSION		"SunFC Port v" FP_VERSION
99 
100 char *fp_version = FP_NAME_VERSION;
101 
102 static struct modldrv modldrv = {
103 	&mod_driverops,			/* Type of Module */
104 	FP_NAME_VERSION,		/* Name/Version of fp */
105 	&fp_ops				/* driver ops */
106 };
107 
108 static struct modlinkage modlinkage = {
109 	MODREV_1,	/* Rev of the loadable modules system */
110 	&modldrv,	/* NULL terminated list of */
111 	NULL		/* Linkage structures */
112 };
113 
114 
115 
116 static uint16_t ns_reg_cmds[] = {
117 	NS_RPN_ID,
118 	NS_RNN_ID,
119 	NS_RCS_ID,
120 	NS_RFT_ID,
121 	NS_RPT_ID,
122 	NS_RSPN_ID,
123 	NS_RSNN_NN
124 };
125 
126 struct fp_xlat {
127 	uchar_t	xlat_state;
128 	int	xlat_rval;
129 } fp_xlat [] = {
130 	{ FC_PKT_SUCCESS,	FC_SUCCESS },
131 	{ FC_PKT_REMOTE_STOP,	FC_FAILURE },
132 	{ FC_PKT_LOCAL_RJT,	FC_FAILURE },
133 	{ FC_PKT_NPORT_RJT,	FC_ELS_PREJECT },
134 	{ FC_PKT_FABRIC_RJT,	FC_ELS_FREJECT },
135 	{ FC_PKT_LOCAL_BSY,	FC_TRAN_BUSY },
136 	{ FC_PKT_TRAN_BSY,	FC_TRAN_BUSY },
137 	{ FC_PKT_NPORT_BSY,	FC_PBUSY },
138 	{ FC_PKT_FABRIC_BSY,	FC_FBUSY },
139 	{ FC_PKT_LS_RJT,	FC_FAILURE },
140 	{ FC_PKT_BA_RJT,	FC_FAILURE },
141 	{ FC_PKT_TIMEOUT,	FC_FAILURE },
142 	{ FC_PKT_TRAN_ERROR,	FC_TRANSPORT_ERROR },
143 	{ FC_PKT_FAILURE,	FC_FAILURE },
144 	{ FC_PKT_PORT_OFFLINE,	FC_OFFLINE }
145 };
146 
147 static uchar_t fp_valid_alpas[] = {
148 	0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
149 	0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
150 	0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
151 	0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
152 	0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
153 	0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
154 	0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
155 	0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
156 	0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
157 	0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
158 	0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
159 	0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
160 	0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
161 	0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
162 };
163 
164 static struct fp_perms {
165 	uint16_t	fp_ioctl_cmd;
166 	uchar_t		fp_open_flag;
167 } fp_perm_list [] = {
168 	{ FCIO_GET_NUM_DEVS,		FP_OPEN },
169 	{ FCIO_GET_DEV_LIST,		FP_OPEN },
170 	{ FCIO_GET_SYM_PNAME,		FP_OPEN },
171 	{ FCIO_GET_SYM_NNAME,		FP_OPEN },
172 	{ FCIO_SET_SYM_PNAME,		FP_EXCL },
173 	{ FCIO_SET_SYM_NNAME,		FP_EXCL },
174 	{ FCIO_GET_LOGI_PARAMS,		FP_OPEN },
175 	{ FCIO_DEV_LOGIN,		FP_EXCL },
176 	{ FCIO_DEV_LOGOUT,		FP_EXCL },
177 	{ FCIO_GET_STATE,		FP_OPEN },
178 	{ FCIO_DEV_REMOVE,		FP_EXCL },
179 	{ FCIO_GET_FCODE_REV,		FP_OPEN },
180 	{ FCIO_GET_FW_REV,		FP_OPEN },
181 	{ FCIO_GET_DUMP_SIZE,		FP_OPEN },
182 	{ FCIO_FORCE_DUMP,		FP_EXCL },
183 	{ FCIO_GET_DUMP,		FP_OPEN },
184 	{ FCIO_GET_TOPOLOGY,		FP_OPEN },
185 	{ FCIO_RESET_LINK,		FP_EXCL },
186 	{ FCIO_RESET_HARD,		FP_EXCL },
187 	{ FCIO_RESET_HARD_CORE,		FP_EXCL },
188 	{ FCIO_DIAG,			FP_OPEN },
189 	{ FCIO_NS,			FP_EXCL },
190 	{ FCIO_DOWNLOAD_FW,		FP_EXCL },
191 	{ FCIO_DOWNLOAD_FCODE,		FP_EXCL },
192 	{ FCIO_LINK_STATUS,		FP_OPEN },
193 	{ FCIO_GET_HOST_PARAMS,		FP_OPEN },
194 	{ FCIO_GET_NODE_ID,		FP_OPEN },
195 	{ FCIO_SET_NODE_ID,		FP_EXCL },
196 	{ FCIO_SEND_NODE_ID,		FP_OPEN },
197 	{ FCIO_GET_ADAPTER_ATTRIBUTES,	FP_OPEN },
198 	{ FCIO_GET_OTHER_ADAPTER_PORTS,	FP_OPEN },
199 	{ FCIO_GET_ADAPTER_PORT_ATTRIBUTES,	FP_OPEN },
200 	{ FCIO_GET_DISCOVERED_PORT_ATTRIBUTES,	FP_OPEN },
201 	{ FCIO_GET_PORT_ATTRIBUTES,	FP_OPEN },
202 	{ FCIO_GET_ADAPTER_PORT_STATS,	FP_OPEN },
203 	{ FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
204 	{ FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
205 	{ FCIO_DELETE_NPIV_PORT, FP_OPEN },
206 	{ FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
207 	{ FCIO_CREATE_NPIV_PORT, FP_OPEN },
208 	{ FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
209 };
210 
211 static char *fp_pm_comps[] = {
212 	"NAME=FC Port",
213 	"0=Port Down",
214 	"1=Port Up"
215 };
216 
217 
218 #ifdef	_LITTLE_ENDIAN
219 #define	MAKE_BE_32(x)	{						\
220 		uint32_t	*ptr1, i;				\
221 		ptr1 = (uint32_t *)(x);					\
222 		for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
223 			*ptr1 = BE_32(*ptr1);				\
224 			ptr1++;						\
225 		}							\
226 	}
227 #else
228 #define	MAKE_BE_32(x)
229 #endif
230 
231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
232 static uint32_t fp_options = 0;
233 
234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
235 static int fp_retry_delay = FP_RETRY_DELAY;	/* retry after this delay */
236 static int fp_retry_count = FP_RETRY_COUNT;	/* number of retries */
237 unsigned int fp_offline_ticker;			/* seconds */
238 
239 /*
240  * Driver global variable to anchor the list of soft state structs for
241  * all fp driver instances.  Used with the Solaris DDI soft state functions.
242  */
243 static void *fp_driver_softstate;
244 
245 static clock_t	fp_retry_ticks;
246 static clock_t	fp_offline_ticks;
247 
248 static int fp_retry_ticker;
249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
251 
252 static int		fp_log_size = FP_LOG_SIZE;
253 static int		fp_trace = FP_TRACE_DEFAULT;
254 static fc_trace_logq_t	*fp_logq = NULL;
255 
256 int fp_get_adapter_paths(char *pathList, int count);
257 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
258 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
259     la_wwn_t tgt_pwwn, uint32_t port_id);
260 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
261 static void fp_init_symbolic_names(fc_local_port_t *port);
262 
263 
264 /*
265  * Perform global initialization
266  */
267 int
268 _init(void)
269 {
270 	int ret;
271 
272 	if ((ret = ddi_soft_state_init(&fp_driver_softstate,
273 	    sizeof (struct fc_local_port), 8)) != 0) {
274 		return (ret);
275 	}
276 
277 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
278 		ddi_soft_state_fini(&fp_driver_softstate);
279 		return (ret);
280 	}
281 
282 	fp_logq = fc_trace_alloc_logq(fp_log_size);
283 
284 	if ((ret = mod_install(&modlinkage)) != 0) {
285 		fc_trace_free_logq(fp_logq);
286 		ddi_soft_state_fini(&fp_driver_softstate);
287 		scsi_hba_fini(&modlinkage);
288 	}
289 
290 	return (ret);
291 }
292 
293 
294 /*
295  * Prepare for driver unload
296  */
297 int
298 _fini(void)
299 {
300 	int ret;
301 
302 	if ((ret = mod_remove(&modlinkage)) == 0) {
303 		fc_trace_free_logq(fp_logq);
304 		ddi_soft_state_fini(&fp_driver_softstate);
305 		scsi_hba_fini(&modlinkage);
306 	}
307 
308 	return (ret);
309 }
310 
311 
312 /*
313  * Request mod_info() to handle all cases
314  */
315 int
316 _info(struct modinfo *modinfo)
317 {
318 	return (mod_info(&modlinkage, modinfo));
319 }
320 
321 
322 /*
323  * fp_attach:
324  *
325  * The respective cmd handlers take care of performing
326  * ULP related invocations
327  */
328 static int
329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
330 {
331 	int rval;
332 
333 	/*
334 	 * We check the value of fp_offline_ticker at this
335 	 * point. The variable is global for the driver and
336 	 * not specific to an instance.
337 	 *
338 	 * If there is no user-defined value found in /etc/system
339 	 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
340 	 * The minimum setting for this offline timeout according
341 	 * to the FC-FS2 standard (Fibre Channel Framing and
342 	 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
343 	 *
344 	 * We do not recommend setting the value to less than 10
345 	 * seconds (RA_TOV) or more than 90 seconds. If this
346 	 * variable is greater than 90 seconds then drivers above
347 	 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
348 	 */
349 
350 	fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
351 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
352 	    FP_OFFLINE_TICKER);
353 
354 	if ((fp_offline_ticker < 10) ||
355 	    (fp_offline_ticker > 90)) {
356 		cmn_err(CE_WARN, "Setting fp_offline_ticker to "
357 		    "%d second(s). This is outside the "
358 		    "recommended range of 10..90 seconds",
359 		    fp_offline_ticker);
360 	}
361 
362 	/*
363 	 * Tick every second when there are commands to retry.
364 	 * It should tick at the least granular value of pkt_timeout
365 	 * (which is one second)
366 	 */
367 	fp_retry_ticker = 1;
368 
369 	fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
370 	fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
371 
372 	switch (cmd) {
373 	case DDI_ATTACH:
374 		rval = fp_attach_handler(dip);
375 		break;
376 
377 	case DDI_RESUME:
378 		rval = fp_resume_handler(dip);
379 		break;
380 
381 	default:
382 		rval = DDI_FAILURE;
383 		break;
384 	}
385 	return (rval);
386 }
387 
388 
389 /*
390  * fp_detach:
391  *
392  * If a ULP fails to handle cmd request converse of
393  * cmd is invoked for ULPs that previously succeeded
394  * cmd request.
395  */
396 static int
397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
398 {
399 	int			rval = DDI_FAILURE;
400 	fc_local_port_t		*port;
401 	fc_attach_cmd_t		converse;
402 	uint8_t			cnt;
403 
404 	if ((port = ddi_get_soft_state(fp_driver_softstate,
405 	    ddi_get_instance(dip))) == NULL) {
406 		return (DDI_FAILURE);
407 	}
408 
409 	mutex_enter(&port->fp_mutex);
410 
411 	if (port->fp_ulp_attach) {
412 		mutex_exit(&port->fp_mutex);
413 		return (DDI_FAILURE);
414 	}
415 
416 	switch (cmd) {
417 	case DDI_DETACH:
418 		if (port->fp_task != FP_TASK_IDLE) {
419 			mutex_exit(&port->fp_mutex);
420 			return (DDI_FAILURE);
421 		}
422 
423 		/* Let's attempt to quit the job handler gracefully */
424 		port->fp_soft_state |= FP_DETACH_INPROGRESS;
425 
426 		mutex_exit(&port->fp_mutex);
427 		converse = FC_CMD_ATTACH;
428 		if (fctl_detach_ulps(port, FC_CMD_DETACH,
429 		    &modlinkage) != FC_SUCCESS) {
430 			mutex_enter(&port->fp_mutex);
431 			port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
432 			mutex_exit(&port->fp_mutex);
433 			rval = DDI_FAILURE;
434 			break;
435 		}
436 
437 		mutex_enter(&port->fp_mutex);
438 		for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
439 		    cnt++) {
440 			mutex_exit(&port->fp_mutex);
441 			delay(drv_usectohz(1000000));
442 			mutex_enter(&port->fp_mutex);
443 		}
444 
445 		if (port->fp_job_head) {
446 			mutex_exit(&port->fp_mutex);
447 			rval = DDI_FAILURE;
448 			break;
449 		}
450 		mutex_exit(&port->fp_mutex);
451 
452 		rval = fp_detach_handler(port);
453 		break;
454 
455 	case DDI_SUSPEND:
456 		mutex_exit(&port->fp_mutex);
457 		converse = FC_CMD_RESUME;
458 		if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
459 		    &modlinkage) != FC_SUCCESS) {
460 			rval = DDI_FAILURE;
461 			break;
462 		}
463 		if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
464 			(void) callb_generic_cpr(&port->fp_cpr_info,
465 			    CB_CODE_CPR_RESUME);
466 		}
467 		break;
468 
469 	default:
470 		mutex_exit(&port->fp_mutex);
471 		break;
472 	}
473 
474 	/*
475 	 * Use softint to perform reattach.  Mark fp_ulp_attach so we
476 	 * don't attempt to do this repeatedly on behalf of some persistent
477 	 * caller.
478 	 */
479 	if (rval != DDI_SUCCESS) {
480 		mutex_enter(&port->fp_mutex);
481 		port->fp_ulp_attach = 1;
482 
483 		/*
484 		 * If the port is in the low power mode then there is
485 		 * possibility that fca too could be in low power mode.
486 		 * Try to raise the power before calling attach ulps.
487 		 */
488 
489 		if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
490 		    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
491 			mutex_exit(&port->fp_mutex);
492 			(void) pm_raise_power(port->fp_port_dip,
493 			    FP_PM_COMPONENT, FP_PM_PORT_UP);
494 		} else {
495 			mutex_exit(&port->fp_mutex);
496 		}
497 
498 
499 		fp_attach_ulps(port, converse);
500 
501 		mutex_enter(&port->fp_mutex);
502 		while (port->fp_ulp_attach) {
503 			cv_wait(&port->fp_attach_cv, &port->fp_mutex);
504 		}
505 
506 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
507 
508 		/*
509 		 * Mark state as detach failed so asynchronous ULP attach
510 		 * events (downstream, not the ones we're initiating with
511 		 * the call to fp_attach_ulps) are not honored.	 We're
512 		 * really still in pending detach.
513 		 */
514 		port->fp_soft_state |= FP_DETACH_FAILED;
515 
516 		mutex_exit(&port->fp_mutex);
517 	}
518 
519 	return (rval);
520 }
521 
522 
523 /*
524  * fp_getinfo:
525  *   Given the device number, return either the
526  *   dev_info_t pointer or the instance number.
527  */
528 
529 /* ARGSUSED */
530 static int
531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
532 {
533 	int		rval;
534 	minor_t		instance;
535 	fc_local_port_t *port;
536 
537 	rval = DDI_SUCCESS;
538 	instance = getminor((dev_t)arg);
539 
540 	switch (cmd) {
541 	case DDI_INFO_DEVT2DEVINFO:
542 		if ((port = ddi_get_soft_state(fp_driver_softstate,
543 		    instance)) == NULL) {
544 			rval = DDI_FAILURE;
545 			break;
546 		}
547 		*result = (void *)port->fp_port_dip;
548 		break;
549 
550 	case DDI_INFO_DEVT2INSTANCE:
551 		*result = (void *)(uintptr_t)instance;
552 		break;
553 
554 	default:
555 		rval = DDI_FAILURE;
556 		break;
557 	}
558 
559 	return (rval);
560 }
561 
562 
563 /*
564  * Entry point for power up and power down request from kernel
565  */
566 static int
567 fp_power(dev_info_t *dip, int comp, int level)
568 {
569 	int		rval = DDI_FAILURE;
570 	fc_local_port_t	*port;
571 
572 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
573 	if (port == NULL || comp != FP_PM_COMPONENT) {
574 		return (rval);
575 	}
576 
577 	switch (level) {
578 	case FP_PM_PORT_UP:
579 		rval = DDI_SUCCESS;
580 
581 		/*
582 		 * If the port is DDI_SUSPENDed, let the DDI_RESUME
583 		 * code complete the rediscovery.
584 		 */
585 		mutex_enter(&port->fp_mutex);
586 		if (port->fp_soft_state & FP_SOFT_SUSPEND) {
587 			port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
588 			port->fp_pm_level = FP_PM_PORT_UP;
589 			mutex_exit(&port->fp_mutex);
590 			fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
591 			break;
592 		}
593 
594 		if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
595 			ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
596 
597 			port->fp_pm_level = FP_PM_PORT_UP;
598 			rval = fp_power_up(port);
599 			if (rval != DDI_SUCCESS) {
600 				port->fp_pm_level = FP_PM_PORT_DOWN;
601 			}
602 		} else {
603 			port->fp_pm_level = FP_PM_PORT_UP;
604 		}
605 		mutex_exit(&port->fp_mutex);
606 		break;
607 
608 	case FP_PM_PORT_DOWN:
609 		mutex_enter(&port->fp_mutex);
610 
611 		ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
612 		if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
613 			/*
614 			 * PM framework goofed up. We have don't
615 			 * have any PM components. Let's never go down.
616 			 */
617 			mutex_exit(&port->fp_mutex);
618 			break;
619 
620 		}
621 
622 		if (port->fp_ulp_attach) {
623 			/* We shouldn't let the power go down */
624 			mutex_exit(&port->fp_mutex);
625 			break;
626 		}
627 
628 		/*
629 		 * Not a whole lot to do if we are detaching
630 		 */
631 		if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
632 			port->fp_pm_level = FP_PM_PORT_DOWN;
633 			mutex_exit(&port->fp_mutex);
634 			rval = DDI_SUCCESS;
635 			break;
636 		}
637 
638 		if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
639 			port->fp_pm_level = FP_PM_PORT_DOWN;
640 
641 			rval = fp_power_down(port);
642 			if (rval != DDI_SUCCESS) {
643 				port->fp_pm_level = FP_PM_PORT_UP;
644 				ASSERT(!(port->fp_soft_state &
645 				    FP_SOFT_POWER_DOWN));
646 			} else {
647 				ASSERT(port->fp_soft_state &
648 				    FP_SOFT_POWER_DOWN);
649 			}
650 		}
651 		mutex_exit(&port->fp_mutex);
652 		break;
653 
654 	default:
655 		break;
656 	}
657 
658 	return (rval);
659 }
660 
661 
662 /*
663  * Open FC port devctl node
664  */
665 static int
666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
667 {
668 	int		instance;
669 	fc_local_port_t *port;
670 
671 	if (otype != OTYP_CHR) {
672 		return (EINVAL);
673 	}
674 
675 	/*
676 	 * This is not a toy to play with. Allow only powerful
677 	 * users (hopefully knowledgeable) to access the port
678 	 * (A hacker potentially could download a sick binary
679 	 * file into FCA)
680 	 */
681 	if (drv_priv(credp)) {
682 		return (EPERM);
683 	}
684 
685 	instance = (int)getminor(*devp);
686 
687 	port = ddi_get_soft_state(fp_driver_softstate, instance);
688 	if (port == NULL) {
689 		return (ENXIO);
690 	}
691 
692 	mutex_enter(&port->fp_mutex);
693 	if (port->fp_flag & FP_EXCL) {
694 		/*
695 		 * It is already open for exclusive access.
696 		 * So shut the door on this caller.
697 		 */
698 		mutex_exit(&port->fp_mutex);
699 		return (EBUSY);
700 	}
701 
702 	if (flag & FEXCL) {
703 		if (port->fp_flag & FP_OPEN) {
704 			/*
705 			 * Exclusive operation not possible
706 			 * as it is already opened
707 			 */
708 			mutex_exit(&port->fp_mutex);
709 			return (EBUSY);
710 		}
711 		port->fp_flag |= FP_EXCL;
712 	}
713 	port->fp_flag |= FP_OPEN;
714 	mutex_exit(&port->fp_mutex);
715 
716 	return (0);
717 }
718 
719 
720 /*
721  * The driver close entry point is called on the last close()
722  * of a device. So it is perfectly alright to just clobber the
723  * open flag and reset it to idle (instead of having to reset
724  * each flag bits). For any confusion, check out close(9E).
725  */
726 
727 /* ARGSUSED */
728 static int
729 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
730 {
731 	int		instance;
732 	fc_local_port_t *port;
733 
734 	if (otype != OTYP_CHR) {
735 		return (EINVAL);
736 	}
737 
738 	instance = (int)getminor(dev);
739 
740 	port = ddi_get_soft_state(fp_driver_softstate, instance);
741 	if (port == NULL) {
742 		return (ENXIO);
743 	}
744 
745 	mutex_enter(&port->fp_mutex);
746 	if ((port->fp_flag & FP_OPEN) == 0) {
747 		mutex_exit(&port->fp_mutex);
748 		return (ENODEV);
749 	}
750 	port->fp_flag = FP_IDLE;
751 	mutex_exit(&port->fp_mutex);
752 
753 	return (0);
754 }
755 
756 /*
757  * Handle IOCTL requests
758  */
759 
760 /* ARGSUSED */
761 static int
762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
763 {
764 	int		instance;
765 	int		ret = 0;
766 	fcio_t		fcio;
767 	fc_local_port_t *port;
768 
769 	instance = (int)getminor(dev);
770 
771 	port = ddi_get_soft_state(fp_driver_softstate, instance);
772 	if (port == NULL) {
773 		return (ENXIO);
774 	}
775 
776 	mutex_enter(&port->fp_mutex);
777 	if ((port->fp_flag & FP_OPEN) == 0) {
778 		mutex_exit(&port->fp_mutex);
779 		return (ENXIO);
780 	}
781 
782 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
783 		mutex_exit(&port->fp_mutex);
784 		return (ENXIO);
785 	}
786 
787 	mutex_exit(&port->fp_mutex);
788 
789 	/* this will raise power if necessary */
790 	ret = fctl_busy_port(port);
791 	if (ret != 0) {
792 		return (ret);
793 	}
794 
795 	ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
796 
797 
798 	switch (cmd) {
799 	case FCIO_CMD: {
800 #ifdef	_MULTI_DATAMODEL
801 		switch (ddi_model_convert_from(mode & FMODELS)) {
802 		case DDI_MODEL_ILP32: {
803 			struct fcio32 fcio32;
804 
805 			if (ddi_copyin((void *)data, (void *)&fcio32,
806 			    sizeof (struct fcio32), mode)) {
807 				ret = EFAULT;
808 				break;
809 			}
810 			fcio.fcio_xfer = fcio32.fcio_xfer;
811 			fcio.fcio_cmd = fcio32.fcio_cmd;
812 			fcio.fcio_flags = fcio32.fcio_flags;
813 			fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
814 			fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
815 			fcio.fcio_ibuf =
816 			    (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
817 			fcio.fcio_olen = (size_t)fcio32.fcio_olen;
818 			fcio.fcio_obuf =
819 			    (caddr_t)(uintptr_t)fcio32.fcio_obuf;
820 			fcio.fcio_alen = (size_t)fcio32.fcio_alen;
821 			fcio.fcio_abuf =
822 			    (caddr_t)(uintptr_t)fcio32.fcio_abuf;
823 			fcio.fcio_errno = fcio32.fcio_errno;
824 			break;
825 		}
826 
827 		case DDI_MODEL_NONE:
828 			if (ddi_copyin((void *)data, (void *)&fcio,
829 			    sizeof (fcio_t), mode)) {
830 				ret = EFAULT;
831 			}
832 			break;
833 		}
834 #else	/* _MULTI_DATAMODEL */
835 		if (ddi_copyin((void *)data, (void *)&fcio,
836 		    sizeof (fcio_t), mode)) {
837 			ret = EFAULT;
838 			break;
839 		}
840 #endif	/* _MULTI_DATAMODEL */
841 		if (!ret) {
842 			ret = fp_fciocmd(port, data, mode, &fcio);
843 		}
844 		break;
845 	}
846 
847 	default:
848 		ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
849 		    mode, credp, rval);
850 	}
851 
852 	fctl_idle_port(port);
853 
854 	return (ret);
855 }
856 
857 
858 /*
859  * Init Symbolic Port Name and Node Name
860  * LV will try to get symbolic names from FCA driver
861  * and register these to name server,
862  * if LV fails to get these,
863  * LV will register its default symbolic names to name server.
864  * The Default symbolic node name format is :
865  *	<hostname>:<hba driver name>(instance)
866  * The Default symbolic port name format is :
867  *	<fp path name>
868  */
869 static void
870 fp_init_symbolic_names(fc_local_port_t *port)
871 {
872 	const char *vendorname = ddi_driver_name(port->fp_fca_dip);
873 	char *sym_name;
874 	char fcaname[50] = {0};
875 	int hostnlen, fcanlen;
876 
877 	if (port->fp_sym_node_namelen == 0) {
878 		hostnlen = strlen(utsname.nodename);
879 		(void) snprintf(fcaname, sizeof (fcaname),
880 		    "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
881 		fcanlen = strlen(fcaname);
882 
883 		sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
884 		(void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
885 		port->fp_sym_node_namelen = strlen(sym_name);
886 		if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
887 			port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
888 		}
889 		(void) strncpy(port->fp_sym_node_name, sym_name,
890 		    port->fp_sym_node_namelen);
891 		kmem_free(sym_name, hostnlen + fcanlen + 2);
892 	}
893 
894 	if (port->fp_sym_port_namelen == 0) {
895 		char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
896 
897 		(void) ddi_pathname(port->fp_port_dip, pathname);
898 		port->fp_sym_port_namelen = strlen(pathname);
899 		if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
900 			port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
901 		}
902 		(void) strncpy(port->fp_sym_port_name, pathname,
903 		    port->fp_sym_port_namelen);
904 		kmem_free(pathname, MAXPATHLEN);
905 	}
906 }
907 
908 
909 /*
910  * Perform port attach
911  */
912 static int
913 fp_attach_handler(dev_info_t *dip)
914 {
915 	int			rval;
916 	int			instance;
917 	int			port_num;
918 	int			port_len;
919 	char			name[30];
920 	char			i_pwwn[17];
921 	fp_cmd_t		*pkt;
922 	uint32_t		ub_count;
923 	fc_local_port_t		*port;
924 	job_request_t		*job;
925 	fc_local_port_t *phyport = NULL;
926 	int portpro1;
927 	char pwwn[17], nwwn[17];
928 
929 	instance = ddi_get_instance(dip);
930 
931 	port_len = sizeof (port_num);
932 
933 	rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
934 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
935 	    (caddr_t)&port_num, &port_len);
936 
937 	if (rval != DDI_SUCCESS) {
938 		cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
939 		    instance);
940 		return (DDI_FAILURE);
941 	}
942 
943 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
944 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
945 		cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
946 		    instance);
947 		return (DDI_FAILURE);
948 	}
949 
950 	if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
951 	    DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
952 		cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
953 		    " point minor node", instance);
954 		ddi_remove_minor_node(dip, NULL);
955 		return (DDI_FAILURE);
956 	}
957 
958 	if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
959 	    != DDI_SUCCESS) {
960 		cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
961 		    instance);
962 		ddi_remove_minor_node(dip, NULL);
963 		return (DDI_FAILURE);
964 	}
965 	port = ddi_get_soft_state(fp_driver_softstate, instance);
966 
967 	(void) sprintf(port->fp_ibuf, "fp(%d)", instance);
968 
969 	port->fp_instance = instance;
970 	port->fp_ulp_attach = 1;
971 	port->fp_port_num = port_num;
972 	port->fp_verbose = fp_verbosity;
973 	port->fp_options = fp_options;
974 
975 	port->fp_fca_dip = ddi_get_parent(dip);
976 	port->fp_port_dip = dip;
977 	port->fp_fca_tran = (fc_fca_tran_t *)
978 	    ddi_get_driver_private(port->fp_fca_dip);
979 
980 	port->fp_task = port->fp_last_task = FP_TASK_IDLE;
981 
982 	/*
983 	 * Init the starting value of fp_rscn_count. Note that if
984 	 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
985 	 * actual # of RSCNs will be (fp_rscn_count - 1)
986 	 */
987 	port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
988 
989 	mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
990 	cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
991 	cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
992 
993 	(void) sprintf(name, "fp%d_cache", instance);
994 
995 	if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
996 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
997 	    "phyport-instance", -1)) != -1) {
998 		phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
999 		fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
1000 		fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
1001 		port->fp_npiv_type = FC_NPIV_PORT;
1002 	}
1003 
1004 	/*
1005 	 * Allocate the pool of fc_packet_t structs to be used with
1006 	 * this fp instance.
1007 	 */
1008 	port->fp_pkt_cache = kmem_cache_create(name,
1009 	    (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1010 	    fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1011 	    NULL, 0);
1012 	port->fp_out_fpcmds = 0;
1013 	if (port->fp_pkt_cache == NULL) {
1014 		goto cache_alloc_failed;
1015 	}
1016 
1017 
1018 	/*
1019 	 * Allocate the d_id and pwwn hash tables for all remote ports
1020 	 * connected to this local port.
1021 	 */
1022 	port->fp_did_table = kmem_zalloc(did_table_size *
1023 	    sizeof (struct d_id_hash), KM_SLEEP);
1024 
1025 	port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1026 	    sizeof (struct pwwn_hash), KM_SLEEP);
1027 
1028 	port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1029 	    MINCLSYSPRI, 1, 16, 0);
1030 
1031 	/* Indicate that don't have the pm components yet */
1032 	port->fp_soft_state |=	FP_SOFT_NO_PMCOMP;
1033 
1034 	/*
1035 	 * Bind the callbacks with the FCA driver. This will open the gate
1036 	 * for asynchronous callbacks, so after this call the fp_mutex
1037 	 * must be held when updating the fc_local_port_t struct.
1038 	 *
1039 	 * This is done _before_ setting up the job thread so we can avoid
1040 	 * cleaning up after the thread_create() in the error path. This
1041 	 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1042 	 */
1043 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1044 		goto bind_callbacks_failed;
1045 	}
1046 
1047 	if (phyport) {
1048 		mutex_enter(&phyport->fp_mutex);
1049 		if (phyport->fp_port_next) {
1050 			phyport->fp_port_next->fp_port_prev = port;
1051 			port->fp_port_next =  phyport->fp_port_next;
1052 			phyport->fp_port_next = port;
1053 			port->fp_port_prev = phyport;
1054 		} else {
1055 			phyport->fp_port_next = port;
1056 			phyport->fp_port_prev = port;
1057 			port->fp_port_next =  phyport;
1058 			port->fp_port_prev = phyport;
1059 		}
1060 		mutex_exit(&phyport->fp_mutex);
1061 	}
1062 
1063 	/*
1064 	 * Init Symbolic Names
1065 	 */
1066 	fp_init_symbolic_names(port);
1067 
1068 	pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1069 	    KM_SLEEP, NULL);
1070 
1071 	if (pkt == NULL) {
1072 		cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1073 		    instance);
1074 		goto alloc_els_packet_failed;
1075 	}
1076 
1077 	(void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1078 	    v.v_maxsyspri - 2);
1079 
1080 	fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1081 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1082 	    i_pwwn) != DDI_PROP_SUCCESS) {
1083 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1084 		    "fp(%d): Updating 'initiator-port' property"
1085 		    " on fp dev_info node failed", instance);
1086 	}
1087 
1088 	fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1089 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1090 	    i_pwwn) != DDI_PROP_SUCCESS) {
1091 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1092 		    "fp(%d): Updating 'initiator-node' property"
1093 		    " on fp dev_info node failed", instance);
1094 	}
1095 
1096 	mutex_enter(&port->fp_mutex);
1097 	port->fp_els_resp_pkt = pkt;
1098 	mutex_exit(&port->fp_mutex);
1099 
1100 	/*
1101 	 * Determine the count of unsolicited buffers this FCA can support
1102 	 */
1103 	fp_retrieve_caps(port);
1104 
1105 	/*
1106 	 * Allocate unsolicited buffer tokens
1107 	 */
1108 	if (port->fp_ub_count) {
1109 		ub_count = port->fp_ub_count;
1110 		port->fp_ub_tokens = kmem_zalloc(ub_count *
1111 		    sizeof (*port->fp_ub_tokens), KM_SLEEP);
1112 		/*
1113 		 * Do not fail the attach if unsolicited buffer allocation
1114 		 * fails; Just try to get along with whatever the FCA can do.
1115 		 */
1116 		if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1117 		    FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1118 		    FC_SUCCESS || ub_count != port->fp_ub_count) {
1119 			cmn_err(CE_WARN, "fp(%d): failed to allocate "
1120 			    " Unsolicited buffers. proceeding with attach...",
1121 			    instance);
1122 			kmem_free(port->fp_ub_tokens,
1123 			    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1124 			port->fp_ub_tokens = NULL;
1125 		}
1126 	}
1127 
1128 	fp_load_ulp_modules(dip, port);
1129 
1130 	/*
1131 	 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1132 	 */
1133 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1134 	    "pm-hardware-state", "needs-suspend-resume",
1135 	    strlen("needs-suspend-resume") + 1);
1136 
1137 	/*
1138 	 * fctl maintains a list of all port handles, so
1139 	 * help fctl add this one to its list now.
1140 	 */
1141 	mutex_enter(&port->fp_mutex);
1142 	fctl_add_port(port);
1143 
1144 	/*
1145 	 * If a state change is already in progress, set the bind state t
1146 	 * OFFLINE as well, so further state change callbacks into ULPs
1147 	 * will pass the appropriate states
1148 	 */
1149 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1150 	    port->fp_statec_busy) {
1151 		port->fp_bind_state = FC_STATE_OFFLINE;
1152 		mutex_exit(&port->fp_mutex);
1153 
1154 		fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1155 	} else {
1156 		/*
1157 		 * Without dropping the mutex, ensure that the port
1158 		 * startup happens ahead of state change callback
1159 		 * processing
1160 		 */
1161 		ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1162 
1163 		port->fp_last_task = port->fp_task;
1164 		port->fp_task = FP_TASK_PORT_STARTUP;
1165 
1166 		job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1167 		    fp_startup_done, (opaque_t)port, KM_SLEEP);
1168 
1169 		port->fp_job_head = port->fp_job_tail = job;
1170 
1171 		cv_signal(&port->fp_cv);
1172 
1173 		mutex_exit(&port->fp_mutex);
1174 	}
1175 
1176 	mutex_enter(&port->fp_mutex);
1177 	while (port->fp_ulp_attach) {
1178 		cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1179 	}
1180 	mutex_exit(&port->fp_mutex);
1181 
1182 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1183 	    "pm-components", fp_pm_comps,
1184 	    sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1185 	    DDI_PROP_SUCCESS) {
1186 		FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1187 		    " components property, PM disabled on this port.");
1188 		mutex_enter(&port->fp_mutex);
1189 		port->fp_pm_level = FP_PM_PORT_UP;
1190 		mutex_exit(&port->fp_mutex);
1191 	} else {
1192 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1193 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1194 			FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1195 			    " power level");
1196 			mutex_enter(&port->fp_mutex);
1197 			port->fp_pm_level = FP_PM_PORT_UP;
1198 			mutex_exit(&port->fp_mutex);
1199 		}
1200 
1201 		/*
1202 		 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1203 		 * the call to pm_raise_power.	The PM framework can't
1204 		 * handle multiple threads calling into it during attach.
1205 		 */
1206 
1207 		mutex_enter(&port->fp_mutex);
1208 		port->fp_soft_state &=	~FP_SOFT_NO_PMCOMP;
1209 		mutex_exit(&port->fp_mutex);
1210 	}
1211 
1212 	ddi_report_dev(dip);
1213 
1214 	fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1215 
1216 	return (DDI_SUCCESS);
1217 
1218 	/*
1219 	 * Unwind any/all preceeding allocations in the event of an error.
1220 	 */
1221 
1222 alloc_els_packet_failed:
1223 
1224 	if (port->fp_fca_handle != NULL) {
1225 		port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1226 		port->fp_fca_handle = NULL;
1227 	}
1228 
1229 	if (port->fp_ub_tokens != NULL) {
1230 		(void) fc_ulp_ubfree(port, port->fp_ub_count,
1231 		    port->fp_ub_tokens);
1232 		kmem_free(port->fp_ub_tokens,
1233 		    port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1234 		port->fp_ub_tokens = NULL;
1235 	}
1236 
1237 	if (port->fp_els_resp_pkt != NULL) {
1238 		fp_free_pkt(port->fp_els_resp_pkt);
1239 		port->fp_els_resp_pkt = NULL;
1240 	}
1241 
1242 bind_callbacks_failed:
1243 
1244 	if (port->fp_taskq != NULL) {
1245 		taskq_destroy(port->fp_taskq);
1246 	}
1247 
1248 	if (port->fp_pwwn_table != NULL) {
1249 		kmem_free(port->fp_pwwn_table,
1250 		    pwwn_table_size * sizeof (struct pwwn_hash));
1251 		port->fp_pwwn_table = NULL;
1252 	}
1253 
1254 	if (port->fp_did_table != NULL) {
1255 		kmem_free(port->fp_did_table,
1256 		    did_table_size * sizeof (struct d_id_hash));
1257 		port->fp_did_table = NULL;
1258 	}
1259 
1260 	if (port->fp_pkt_cache != NULL) {
1261 		kmem_cache_destroy(port->fp_pkt_cache);
1262 		port->fp_pkt_cache = NULL;
1263 	}
1264 
1265 cache_alloc_failed:
1266 
1267 	cv_destroy(&port->fp_attach_cv);
1268 	cv_destroy(&port->fp_cv);
1269 	mutex_destroy(&port->fp_mutex);
1270 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1271 	ddi_soft_state_free(fp_driver_softstate, instance);
1272 	ddi_prop_remove_all(dip);
1273 
1274 	return (DDI_FAILURE);
1275 }
1276 
1277 
1278 /*
1279  * Handle DDI_RESUME request
1280  */
1281 static int
1282 fp_resume_handler(dev_info_t *dip)
1283 {
1284 	int		rval;
1285 	fc_local_port_t *port;
1286 
1287 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1288 
1289 	ASSERT(port != NULL);
1290 
1291 #ifdef	DEBUG
1292 	mutex_enter(&port->fp_mutex);
1293 	ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1294 	mutex_exit(&port->fp_mutex);
1295 #endif
1296 
1297 	/*
1298 	 * If the port was power suspended, raise the power level
1299 	 */
1300 	mutex_enter(&port->fp_mutex);
1301 	if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1302 	    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1303 		ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1304 
1305 		mutex_exit(&port->fp_mutex);
1306 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1307 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1308 			FP_TRACE(FP_NHEAD2(9, 0),
1309 			    "Failed to raise the power level");
1310 			return (DDI_FAILURE);
1311 		}
1312 		mutex_enter(&port->fp_mutex);
1313 	}
1314 	port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1315 	mutex_exit(&port->fp_mutex);
1316 
1317 	/*
1318 	 * All the discovery is initiated and handled by per-port thread.
1319 	 * Further all the discovery is done in handled in callback mode
1320 	 * (not polled mode); In a specific case such as this, the discovery
1321 	 * is required to happen in polled mode. The easiest way out is
1322 	 * to bail out port thread and get started. Come back and fix this
1323 	 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1324 	 * will do on-demand discovery during pre-power-up busctl handling
1325 	 * which will only be possible when SCSA provides a new HBA vector
1326 	 * for sending down the PM busctl requests.
1327 	 */
1328 	(void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1329 
1330 	rval = fp_resume_all(port, FC_CMD_RESUME);
1331 	if (rval != DDI_SUCCESS) {
1332 		mutex_enter(&port->fp_mutex);
1333 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1334 		mutex_exit(&port->fp_mutex);
1335 		(void) callb_generic_cpr(&port->fp_cpr_info,
1336 		    CB_CODE_CPR_CHKPT);
1337 	}
1338 
1339 	return (rval);
1340 }
1341 
1342 /*
1343  * Perform FC Port power on initialization
1344  */
1345 static int
1346 fp_power_up(fc_local_port_t *port)
1347 {
1348 	int	rval;
1349 
1350 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1351 
1352 	ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1353 	ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1354 
1355 	port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1356 
1357 	mutex_exit(&port->fp_mutex);
1358 
1359 	rval = fp_resume_all(port, FC_CMD_POWER_UP);
1360 	if (rval != DDI_SUCCESS) {
1361 		mutex_enter(&port->fp_mutex);
1362 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1363 	} else {
1364 		mutex_enter(&port->fp_mutex);
1365 	}
1366 
1367 	return (rval);
1368 }
1369 
1370 
1371 /*
1372  * It is important to note that the power may possibly be removed between
1373  * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1374  * FC port hardware would have gone through an OFFLINE to ONLINE transition
1375  * (hardware state). In this case, the port driver may need to rediscover the
1376  * topology, perform LOGINs, register with the name server again and perform
1377  * any such port initialization procedures. To perform LOGINs, the driver could
1378  * use the port device handle to see if a LOGIN needs to be performed and use
1379  * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1380  * or removed) which will be reflected in the map the ULPs will see.
1381  */
1382 static int
1383 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1384 {
1385 
1386 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1387 
1388 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1389 		return (DDI_FAILURE);
1390 	}
1391 
1392 	mutex_enter(&port->fp_mutex);
1393 
1394 	/*
1395 	 * If there are commands queued for delayed retry, instead of
1396 	 * working the hard way to figure out which ones are good for
1397 	 * restart and which ones not (ELSs are definitely not good
1398 	 * as the port will have to go through a new spin of rediscovery
1399 	 * now), so just flush them out.
1400 	 */
1401 	if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1402 		fp_cmd_t	*cmd;
1403 
1404 		port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1405 
1406 		mutex_exit(&port->fp_mutex);
1407 		while ((cmd = fp_deque_cmd(port)) != NULL) {
1408 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1409 			fp_iodone(cmd);
1410 		}
1411 		mutex_enter(&port->fp_mutex);
1412 	}
1413 
1414 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1415 		if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1416 		    port->fp_dev_count) {
1417 			port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1418 			port->fp_offline_tid = timeout(fp_offline_timeout,
1419 			    (caddr_t)port, fp_offline_ticks);
1420 		}
1421 		if (port->fp_job_head) {
1422 			cv_signal(&port->fp_cv);
1423 		}
1424 		mutex_exit(&port->fp_mutex);
1425 		fctl_attach_ulps(port, cmd, &modlinkage);
1426 	} else {
1427 		struct job_request *job;
1428 
1429 		/*
1430 		 * If an OFFLINE timer was running at the time of
1431 		 * suspending, there is no need to restart it as
1432 		 * the port is ONLINE now.
1433 		 */
1434 		port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1435 		if (port->fp_statec_busy == 0) {
1436 			port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1437 		}
1438 		port->fp_statec_busy++;
1439 		mutex_exit(&port->fp_mutex);
1440 
1441 		job = fctl_alloc_job(JOB_PORT_ONLINE,
1442 		    JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1443 		fctl_enque_job(port, job);
1444 
1445 		fctl_jobwait(job);
1446 		fctl_remove_oldies(port);
1447 
1448 		fctl_attach_ulps(port, cmd, &modlinkage);
1449 		fctl_dealloc_job(job);
1450 	}
1451 
1452 	return (DDI_SUCCESS);
1453 }
1454 
1455 
1456 /*
1457  * At this time, there shouldn't be any I/O requests on this port.
1458  * But the unsolicited callbacks from the underlying FCA port need
1459  * to be handled very carefully. The steps followed to handle the
1460  * DDI_DETACH are:
1461  *	+	Grab the port driver mutex, check if the unsolicited
1462  *		callback is currently under processing. If true, fail
1463  *		the DDI_DETACH request by printing a message; If false
1464  *		mark the DDI_DETACH as under progress, so that any
1465  *		further unsolicited callbacks get bounced.
1466  *	+	Perform PRLO/LOGO if necessary, cleanup all the data
1467  *		structures.
1468  *	+	Get the job_handler thread to gracefully exit.
1469  *	+	Unregister callbacks with the FCA port.
1470  *	+	Now that some peace is found, notify all the ULPs of
1471  *		DDI_DETACH request (using ulp_port_detach entry point)
1472  *	+	Free all mutexes, semaphores, conditional variables.
1473  *	+	Free the soft state, return success.
1474  *
1475  * Important considerations:
1476  *		Port driver de-registers state change and unsolicited
1477  *		callbacks before taking up the task of notifying ULPs
1478  *		and performing PRLO and LOGOs.
1479  *
1480  *		A port may go offline at the time PRLO/LOGO is being
1481  *		requested. It is expected of all FCA drivers to fail
1482  *		such requests either immediately with a FC_OFFLINE
1483  *		return code to fc_fca_transport() or return the packet
1484  *		asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1485  */
1486 static int
1487 fp_detach_handler(fc_local_port_t *port)
1488 {
1489 	job_request_t	*job;
1490 	uint32_t	delay_count;
1491 	fc_orphan_t	*orp, *tmporp;
1492 
1493 	/*
1494 	 * In a Fabric topology with many host ports connected to
1495 	 * a switch, another detaching instance of fp might have
1496 	 * triggered a LOGO (which is an unsolicited request to
1497 	 * this instance). So in order to be able to successfully
1498 	 * detach by taking care of such cases a delay of about
1499 	 * 30 seconds is introduced.
1500 	 */
1501 	delay_count = 0;
1502 	mutex_enter(&port->fp_mutex);
1503 	if (port->fp_out_fpcmds != 0) {
1504 		/*
1505 		 * At this time we can only check fp internal commands, because
1506 		 * sd/ssd/scsi_vhci should have finsihed all their commands,
1507 		 * fcp/fcip/fcsm should have finished all their commands.
1508 		 *
1509 		 * It seems that all fp internal commands are asynchronous now.
1510 		 */
1511 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1512 		mutex_exit(&port->fp_mutex);
1513 
1514 		cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1515 		    " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1516 		return (DDI_FAILURE);
1517 	}
1518 
1519 	while ((port->fp_soft_state &
1520 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1521 	    (delay_count < 30)) {
1522 		mutex_exit(&port->fp_mutex);
1523 		delay_count++;
1524 		delay(drv_usectohz(1000000));
1525 		mutex_enter(&port->fp_mutex);
1526 	}
1527 
1528 	if (port->fp_soft_state &
1529 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1530 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1531 		mutex_exit(&port->fp_mutex);
1532 
1533 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1534 		    " Failing detach", port->fp_instance);
1535 		return (DDI_FAILURE);
1536 	}
1537 
1538 	port->fp_soft_state |= FP_SOFT_IN_DETACH;
1539 	port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1540 	mutex_exit(&port->fp_mutex);
1541 
1542 	/*
1543 	 * If we're powered down, we need to raise power prior to submitting
1544 	 * the JOB_PORT_SHUTDOWN job.  Otherwise, the job handler will never
1545 	 * process the shutdown job.
1546 	 */
1547 	if (fctl_busy_port(port) != 0) {
1548 		cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1549 		    port->fp_instance);
1550 		mutex_enter(&port->fp_mutex);
1551 		port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1552 		mutex_exit(&port->fp_mutex);
1553 		return (DDI_FAILURE);
1554 	}
1555 
1556 	/*
1557 	 * This will deallocate data structs and cause the "job" thread
1558 	 * to exit, in preparation for DDI_DETACH on the instance.
1559 	 * This can sleep for an arbitrary duration, since it waits for
1560 	 * commands over the wire, timeout(9F) callbacks, etc.
1561 	 *
1562 	 * CAUTION: There is still a race here, where the "job" thread
1563 	 * can still be executing code even tho the fctl_jobwait() call
1564 	 * below has returned to us.  In theory the fp driver could even be
1565 	 * modunloaded even tho the job thread isn't done executing.
1566 	 * without creating the race condition.
1567 	 */
1568 	job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1569 	    (opaque_t)port, KM_SLEEP);
1570 	fctl_enque_job(port, job);
1571 	fctl_jobwait(job);
1572 	fctl_dealloc_job(job);
1573 
1574 
1575 	(void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1576 	    FP_PM_PORT_DOWN);
1577 
1578 	if (port->fp_taskq) {
1579 		taskq_destroy(port->fp_taskq);
1580 	}
1581 
1582 	ddi_prop_remove_all(port->fp_port_dip);
1583 
1584 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1585 
1586 	fctl_remove_port(port);
1587 
1588 	fp_free_pkt(port->fp_els_resp_pkt);
1589 
1590 	if (port->fp_ub_tokens) {
1591 		if (fc_ulp_ubfree(port, port->fp_ub_count,
1592 		    port->fp_ub_tokens) != FC_SUCCESS) {
1593 			cmn_err(CE_WARN, "fp(%d): couldn't free "
1594 			    " unsolicited buffers", port->fp_instance);
1595 		}
1596 		kmem_free(port->fp_ub_tokens,
1597 		    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1598 		port->fp_ub_tokens = NULL;
1599 	}
1600 
1601 	if (port->fp_pkt_cache != NULL) {
1602 		kmem_cache_destroy(port->fp_pkt_cache);
1603 	}
1604 
1605 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1606 
1607 	mutex_enter(&port->fp_mutex);
1608 	if (port->fp_did_table) {
1609 		kmem_free(port->fp_did_table, did_table_size *
1610 		    sizeof (struct d_id_hash));
1611 	}
1612 
1613 	if (port->fp_pwwn_table) {
1614 		kmem_free(port->fp_pwwn_table, pwwn_table_size *
1615 		    sizeof (struct pwwn_hash));
1616 	}
1617 	orp = port->fp_orphan_list;
1618 	while (orp) {
1619 		tmporp = orp;
1620 		orp = orp->orp_next;
1621 		kmem_free(tmporp, sizeof (*orp));
1622 	}
1623 
1624 	mutex_exit(&port->fp_mutex);
1625 
1626 	fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1627 
1628 	mutex_destroy(&port->fp_mutex);
1629 	cv_destroy(&port->fp_attach_cv);
1630 	cv_destroy(&port->fp_cv);
1631 	ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1632 
1633 	return (DDI_SUCCESS);
1634 }
1635 
1636 
1637 /*
1638  * Steps to perform DDI_SUSPEND operation on a FC port
1639  *
1640  *	- If already suspended return DDI_FAILURE
1641  *	- If already power-suspended return DDI_SUCCESS
1642  *	- If an unsolicited callback or state change handling is in
1643  *	    in progress, throw a warning message, return DDI_FAILURE
1644  *	- Cancel timeouts
1645  *	- SUSPEND the job_handler thread (means do nothing as it is
1646  *	    taken care of by the CPR frame work)
1647  */
1648 static int
1649 fp_suspend_handler(fc_local_port_t *port)
1650 {
1651 	uint32_t	delay_count;
1652 
1653 	mutex_enter(&port->fp_mutex);
1654 
1655 	/*
1656 	 * The following should never happen, but
1657 	 * let the driver be more defensive here
1658 	 */
1659 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1660 		mutex_exit(&port->fp_mutex);
1661 		return (DDI_FAILURE);
1662 	}
1663 
1664 	/*
1665 	 * If the port is already power suspended, there
1666 	 * is nothing else to do, So return DDI_SUCCESS,
1667 	 * but mark the SUSPEND bit in the soft state
1668 	 * before leaving.
1669 	 */
1670 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1671 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1672 		mutex_exit(&port->fp_mutex);
1673 		return (DDI_SUCCESS);
1674 	}
1675 
1676 	/*
1677 	 * Check if an unsolicited callback or state change handling is
1678 	 * in progress. If true, fail the suspend operation; also throw
1679 	 * a warning message notifying the failure. Note that Sun PCI
1680 	 * hotplug spec recommends messages in cases of failure (but
1681 	 * not flooding the console)
1682 	 *
1683 	 * Busy waiting for a short interval (500 millisecond ?) to see
1684 	 * if the callback processing completes may be another idea. Since
1685 	 * most of the callback processing involves a lot of work, it
1686 	 * is safe to just fail the SUSPEND operation. It is definitely
1687 	 * not bad to fail the SUSPEND operation if the driver is busy.
1688 	 */
1689 	delay_count = 0;
1690 	while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1691 	    FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1692 		mutex_exit(&port->fp_mutex);
1693 		delay_count++;
1694 		delay(drv_usectohz(1000000));
1695 		mutex_enter(&port->fp_mutex);
1696 	}
1697 
1698 	if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1699 	    FP_SOFT_IN_UNSOL_CB)) {
1700 		mutex_exit(&port->fp_mutex);
1701 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1702 		    " Failing suspend", port->fp_instance);
1703 		return (DDI_FAILURE);
1704 	}
1705 
1706 	/*
1707 	 * Check of FC port thread is busy
1708 	 */
1709 	if (port->fp_job_head) {
1710 		mutex_exit(&port->fp_mutex);
1711 		FP_TRACE(FP_NHEAD2(9, 0),
1712 		    "FC port thread is busy: Failing suspend");
1713 		return (DDI_FAILURE);
1714 	}
1715 	port->fp_soft_state |= FP_SOFT_SUSPEND;
1716 
1717 	fp_suspend_all(port);
1718 	mutex_exit(&port->fp_mutex);
1719 
1720 	return (DDI_SUCCESS);
1721 }
1722 
1723 
1724 /*
1725  * Prepare for graceful power down of a FC port
1726  */
1727 static int
1728 fp_power_down(fc_local_port_t *port)
1729 {
1730 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1731 
1732 	/*
1733 	 * Power down request followed by a DDI_SUSPEND should
1734 	 * never happen; If it does return DDI_SUCCESS
1735 	 */
1736 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1737 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1738 		return (DDI_SUCCESS);
1739 	}
1740 
1741 	/*
1742 	 * If the port is already power suspended, there
1743 	 * is nothing else to do, So return DDI_SUCCESS,
1744 	 */
1745 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1746 		return (DDI_SUCCESS);
1747 	}
1748 
1749 	/*
1750 	 * Check if an unsolicited callback or state change handling
1751 	 * is in progress. If true, fail the PM suspend operation.
1752 	 * But don't print a message unless the verbosity of the
1753 	 * driver desires otherwise.
1754 	 */
1755 	if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1756 	    (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1757 		FP_TRACE(FP_NHEAD2(9, 0),
1758 		    "Unsolicited callback in progress: Failing power down");
1759 		return (DDI_FAILURE);
1760 	}
1761 
1762 	/*
1763 	 * Check of FC port thread is busy
1764 	 */
1765 	if (port->fp_job_head) {
1766 		FP_TRACE(FP_NHEAD2(9, 0),
1767 		    "FC port thread is busy: Failing power down");
1768 		return (DDI_FAILURE);
1769 	}
1770 	port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1771 
1772 	/*
1773 	 * check if the ULPs are ready for power down
1774 	 */
1775 	mutex_exit(&port->fp_mutex);
1776 	if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1777 	    &modlinkage) != FC_SUCCESS) {
1778 		mutex_enter(&port->fp_mutex);
1779 		port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1780 		mutex_exit(&port->fp_mutex);
1781 
1782 		/*
1783 		 * Power back up the obedient ULPs that went down
1784 		 */
1785 		fp_attach_ulps(port, FC_CMD_POWER_UP);
1786 
1787 		FP_TRACE(FP_NHEAD2(9, 0),
1788 		    "ULP(s) busy, detach_ulps failed. Failing power down");
1789 		mutex_enter(&port->fp_mutex);
1790 		return (DDI_FAILURE);
1791 	}
1792 	mutex_enter(&port->fp_mutex);
1793 
1794 	fp_suspend_all(port);
1795 
1796 	return (DDI_SUCCESS);
1797 }
1798 
1799 
1800 /*
1801  * Suspend the entire FC port
1802  */
1803 static void
1804 fp_suspend_all(fc_local_port_t *port)
1805 {
1806 	int			index;
1807 	struct pwwn_hash	*head;
1808 	fc_remote_port_t	*pd;
1809 
1810 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1811 
1812 	if (port->fp_wait_tid != 0) {
1813 		timeout_id_t	tid;
1814 
1815 		tid = port->fp_wait_tid;
1816 		port->fp_wait_tid = (timeout_id_t)NULL;
1817 		mutex_exit(&port->fp_mutex);
1818 		(void) untimeout(tid);
1819 		mutex_enter(&port->fp_mutex);
1820 		port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1821 	}
1822 
1823 	if (port->fp_offline_tid) {
1824 		timeout_id_t	tid;
1825 
1826 		tid = port->fp_offline_tid;
1827 		port->fp_offline_tid = (timeout_id_t)NULL;
1828 		mutex_exit(&port->fp_mutex);
1829 		(void) untimeout(tid);
1830 		mutex_enter(&port->fp_mutex);
1831 		port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1832 	}
1833 	mutex_exit(&port->fp_mutex);
1834 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1835 	mutex_enter(&port->fp_mutex);
1836 
1837 	/*
1838 	 * Mark all devices as OLD, and reset the LOGIN state as well
1839 	 * (this will force the ULPs to perform a LOGIN after calling
1840 	 * fc_portgetmap() during RESUME/PM_RESUME)
1841 	 */
1842 	for (index = 0; index < pwwn_table_size; index++) {
1843 		head = &port->fp_pwwn_table[index];
1844 		pd = head->pwwn_head;
1845 		while (pd != NULL) {
1846 			mutex_enter(&pd->pd_mutex);
1847 			fp_remote_port_offline(pd);
1848 			fctl_delist_did_table(port, pd);
1849 			pd->pd_state = PORT_DEVICE_VALID;
1850 			pd->pd_login_count = 0;
1851 			mutex_exit(&pd->pd_mutex);
1852 			pd = pd->pd_wwn_hnext;
1853 		}
1854 	}
1855 }
1856 
1857 
1858 /*
1859  * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1860  * Performs intializations for fc_packet_t structs.
1861  * Returns 0 for success or -1 for failure.
1862  *
1863  * This function allocates DMA handles for both command and responses.
1864  * Most of the ELSs used have both command and responses so it is strongly
1865  * desired to move them to cache constructor routine.
1866  *
1867  * Context: Can sleep iff called with KM_SLEEP flag.
1868  */
1869 static int
1870 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1871 {
1872 	int		(*cb) (caddr_t);
1873 	fc_packet_t	*pkt;
1874 	fp_cmd_t	*cmd = (fp_cmd_t *)buf;
1875 	fc_local_port_t *port = (fc_local_port_t *)cdarg;
1876 
1877 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1878 
1879 	cmd->cmd_next = NULL;
1880 	cmd->cmd_flags = 0;
1881 	cmd->cmd_dflags = 0;
1882 	cmd->cmd_job = NULL;
1883 	cmd->cmd_port = port;
1884 	pkt = &cmd->cmd_pkt;
1885 
1886 	if (ddi_dma_alloc_handle(port->fp_fca_dip,
1887 	    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1888 	    &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1889 		return (-1);
1890 	}
1891 
1892 	if (ddi_dma_alloc_handle(port->fp_fca_dip,
1893 	    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1894 	    &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1895 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1896 		return (-1);
1897 	}
1898 
1899 	pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1900 	pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1901 	    pkt->pkt_data_cookie_cnt = 0;
1902 	pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1903 	    pkt->pkt_data_cookie = NULL;
1904 	pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1905 
1906 	return (0);
1907 }
1908 
1909 
1910 /*
1911  * fp_cache_destructor: Destructor function for kmem_cache_create().
1912  * Performs un-intializations for fc_packet_t structs.
1913  */
1914 /* ARGSUSED */
1915 static void
1916 fp_cache_destructor(void *buf, void *cdarg)
1917 {
1918 	fp_cmd_t	*cmd = (fp_cmd_t *)buf;
1919 	fc_packet_t	*pkt;
1920 
1921 	pkt = &cmd->cmd_pkt;
1922 	if (pkt->pkt_cmd_dma) {
1923 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1924 	}
1925 
1926 	if (pkt->pkt_resp_dma) {
1927 		ddi_dma_free_handle(&pkt->pkt_resp_dma);
1928 	}
1929 }
1930 
1931 
1932 /*
1933  * Packet allocation for ELS and any other port driver commands
1934  *
1935  * Some ELSs like FLOGI and PLOGI are critical for topology and
1936  * device discovery and a system's inability to allocate memory
1937  * or DVMA resources while performing some of these critical ELSs
1938  * cause a lot of problem. While memory allocation failures are
1939  * rare, DVMA resource failures are common as the applications
1940  * are becoming more and more powerful on huge servers.	 So it
1941  * is desirable to have a framework support to reserve a fragment
1942  * of DVMA. So until this is fixed the correct way, the suffering
1943  * is huge whenever a LIP happens at a time DVMA resources are
1944  * drained out completely - So an attempt needs to be made to
1945  * KM_SLEEP while requesting for these resources, hoping that
1946  * the requests won't hang forever.
1947  *
1948  * The fc_remote_port_t argument is stored into the pkt_pd field in the
1949  * fc_packet_t struct prior to the fc_ulp_init_packet() call.  This
1950  * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1951  * If there is no fc_remote_port_t associated with the fc_packet_t, then
1952  * fp_alloc_pkt() must be called with pd set to NULL.
1953  */
1954 
1955 static fp_cmd_t *
1956 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1957     fc_remote_port_t *pd)
1958 {
1959 	int		rval;
1960 	ulong_t		real_len;
1961 	fp_cmd_t	*cmd;
1962 	fc_packet_t	*pkt;
1963 	int		(*cb) (caddr_t);
1964 	ddi_dma_cookie_t	pkt_cookie;
1965 	ddi_dma_cookie_t	*cp;
1966 	uint32_t		cnt;
1967 
1968 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1969 
1970 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1971 
1972 	cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1973 	if (cmd == NULL) {
1974 		return (cmd);
1975 	}
1976 
1977 	cmd->cmd_ulp_pkt = NULL;
1978 	cmd->cmd_flags = 0;
1979 	pkt = &cmd->cmd_pkt;
1980 	ASSERT(cmd->cmd_dflags == 0);
1981 
1982 	pkt->pkt_datalen = 0;
1983 	pkt->pkt_data = NULL;
1984 	pkt->pkt_state = 0;
1985 	pkt->pkt_action = 0;
1986 	pkt->pkt_reason = 0;
1987 	pkt->pkt_expln = 0;
1988 
1989 	/*
1990 	 * Init pkt_pd with the given pointer; this must be done _before_
1991 	 * the call to fc_ulp_init_packet().
1992 	 */
1993 	pkt->pkt_pd = pd;
1994 
1995 	/* Now call the FCA driver to init its private, per-packet fields */
1996 	if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
1997 		goto alloc_pkt_failed;
1998 	}
1999 
2000 	if (cmd_len) {
2001 		ASSERT(pkt->pkt_cmd_dma != NULL);
2002 
2003 		rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2004 		    port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2005 		    cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2006 		    &pkt->pkt_cmd_acc);
2007 
2008 		if (rval != DDI_SUCCESS) {
2009 			goto alloc_pkt_failed;
2010 		}
2011 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2012 
2013 		if (real_len < cmd_len) {
2014 			goto alloc_pkt_failed;
2015 		}
2016 
2017 		rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2018 		    pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2019 		    DDI_DMA_CONSISTENT, cb, NULL,
2020 		    &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2021 
2022 		if (rval != DDI_DMA_MAPPED) {
2023 			goto alloc_pkt_failed;
2024 		}
2025 
2026 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2027 
2028 		if (pkt->pkt_cmd_cookie_cnt >
2029 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2030 			goto alloc_pkt_failed;
2031 		}
2032 
2033 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2034 
2035 		cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2036 		    pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2037 		    KM_NOSLEEP);
2038 
2039 		if (cp == NULL) {
2040 			goto alloc_pkt_failed;
2041 		}
2042 
2043 		*cp = pkt_cookie;
2044 		cp++;
2045 		for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2046 			ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2047 			*cp = pkt_cookie;
2048 		}
2049 	}
2050 
2051 	if (resp_len) {
2052 		ASSERT(pkt->pkt_resp_dma != NULL);
2053 
2054 		rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2055 		    port->fp_fca_tran->fca_acc_attr,
2056 		    DDI_DMA_CONSISTENT, cb, NULL,
2057 		    (caddr_t *)&pkt->pkt_resp, &real_len,
2058 		    &pkt->pkt_resp_acc);
2059 
2060 		if (rval != DDI_SUCCESS) {
2061 			goto alloc_pkt_failed;
2062 		}
2063 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2064 
2065 		if (real_len < resp_len) {
2066 			goto alloc_pkt_failed;
2067 		}
2068 
2069 		rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2070 		    pkt->pkt_resp, real_len, DDI_DMA_READ |
2071 		    DDI_DMA_CONSISTENT, cb, NULL,
2072 		    &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2073 
2074 		if (rval != DDI_DMA_MAPPED) {
2075 			goto alloc_pkt_failed;
2076 		}
2077 
2078 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2079 
2080 		if (pkt->pkt_resp_cookie_cnt >
2081 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2082 			goto alloc_pkt_failed;
2083 		}
2084 
2085 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2086 
2087 		cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2088 		    pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2089 		    KM_NOSLEEP);
2090 
2091 		if (cp == NULL) {
2092 			goto alloc_pkt_failed;
2093 		}
2094 
2095 		*cp = pkt_cookie;
2096 		cp++;
2097 		for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2098 			ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2099 			*cp = pkt_cookie;
2100 		}
2101 	}
2102 
2103 	pkt->pkt_cmdlen = cmd_len;
2104 	pkt->pkt_rsplen = resp_len;
2105 	pkt->pkt_ulp_private = cmd;
2106 
2107 	return (cmd);
2108 
2109 alloc_pkt_failed:
2110 
2111 	fp_free_dma(cmd);
2112 
2113 	if (pkt->pkt_cmd_cookie != NULL) {
2114 		kmem_free(pkt->pkt_cmd_cookie,
2115 		    pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2116 		pkt->pkt_cmd_cookie = NULL;
2117 	}
2118 
2119 	if (pkt->pkt_resp_cookie != NULL) {
2120 		kmem_free(pkt->pkt_resp_cookie,
2121 		    pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2122 		pkt->pkt_resp_cookie = NULL;
2123 	}
2124 
2125 	kmem_cache_free(port->fp_pkt_cache, cmd);
2126 
2127 	return (NULL);
2128 }
2129 
2130 
2131 /*
2132  * Free FC packet
2133  */
2134 static void
2135 fp_free_pkt(fp_cmd_t *cmd)
2136 {
2137 	fc_local_port_t *port;
2138 	fc_packet_t	*pkt;
2139 
2140 	ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2141 
2142 	cmd->cmd_next = NULL;
2143 	cmd->cmd_job = NULL;
2144 	pkt = &cmd->cmd_pkt;
2145 	pkt->pkt_ulp_private = 0;
2146 	pkt->pkt_tran_flags = 0;
2147 	pkt->pkt_tran_type = 0;
2148 	port = cmd->cmd_port;
2149 
2150 	if (pkt->pkt_cmd_cookie != NULL) {
2151 		kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2152 		    sizeof (ddi_dma_cookie_t));
2153 		pkt->pkt_cmd_cookie = NULL;
2154 	}
2155 
2156 	if (pkt->pkt_resp_cookie != NULL) {
2157 		kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2158 		    sizeof (ddi_dma_cookie_t));
2159 		pkt->pkt_resp_cookie = NULL;
2160 	}
2161 
2162 	fp_free_dma(cmd);
2163 	(void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2164 	kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2165 }
2166 
2167 
2168 /*
2169  * Release DVMA resources
2170  */
2171 static void
2172 fp_free_dma(fp_cmd_t *cmd)
2173 {
2174 	fc_packet_t *pkt = &cmd->cmd_pkt;
2175 
2176 	pkt->pkt_cmdlen = 0;
2177 	pkt->pkt_rsplen = 0;
2178 	pkt->pkt_tran_type = 0;
2179 	pkt->pkt_tran_flags = 0;
2180 
2181 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2182 		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2183 	}
2184 
2185 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2186 		if (pkt->pkt_cmd_acc) {
2187 			ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2188 		}
2189 	}
2190 
2191 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2192 		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2193 	}
2194 
2195 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2196 		if (pkt->pkt_resp_acc) {
2197 			ddi_dma_mem_free(&pkt->pkt_resp_acc);
2198 		}
2199 	}
2200 	cmd->cmd_dflags = 0;
2201 }
2202 
2203 
2204 /*
2205  * Dedicated thread to perform various activities.  One thread for
2206  * each fc_local_port_t (driver soft state) instance.
2207  * Note, this effectively works out to one thread for each local
2208  * port, but there are also some Solaris taskq threads in use on a per-local
2209  * port basis; these also need to be taken into consideration.
2210  */
2211 static void
2212 fp_job_handler(fc_local_port_t *port)
2213 {
2214 	int			rval;
2215 	uint32_t		*d_id;
2216 	fc_remote_port_t	*pd;
2217 	job_request_t		*job;
2218 
2219 #ifndef	__lock_lint
2220 	/*
2221 	 * Solaris-internal stuff for proper operation of kernel threads
2222 	 * with Solaris CPR.
2223 	 */
2224 	CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2225 	    callb_generic_cpr, "fp_job_handler");
2226 #endif
2227 
2228 
2229 	/* Loop forever waiting for work to do */
2230 	for (;;) {
2231 
2232 		mutex_enter(&port->fp_mutex);
2233 
2234 		/*
2235 		 * Sleep if no work to do right now, or if we want
2236 		 * to suspend or power-down.
2237 		 */
2238 		while (port->fp_job_head == NULL ||
2239 		    (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2240 		    FP_SOFT_SUSPEND))) {
2241 			CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2242 			cv_wait(&port->fp_cv, &port->fp_mutex);
2243 			CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2244 		}
2245 
2246 		/*
2247 		 * OK, we've just been woken up, so retrieve the next entry
2248 		 * from the head of the job queue for this local port.
2249 		 */
2250 		job = fctl_deque_job(port);
2251 
2252 		/*
2253 		 * Handle all the fp driver's supported job codes here
2254 		 * in this big honkin' switch.
2255 		 */
2256 		switch (job->job_code) {
2257 		case JOB_PORT_SHUTDOWN:
2258 			/*
2259 			 * fp_port_shutdown() is only called from here. This
2260 			 * will prepare the local port instance (softstate)
2261 			 * for detaching.  This cancels timeout callbacks,
2262 			 * executes LOGOs with remote ports, cleans up tables,
2263 			 * and deallocates data structs.
2264 			 */
2265 			fp_port_shutdown(port, job);
2266 
2267 			/*
2268 			 * This will exit the job thread.
2269 			 */
2270 #ifndef __lock_lint
2271 			CALLB_CPR_EXIT(&(port->fp_cpr_info));
2272 #else
2273 			mutex_exit(&port->fp_mutex);
2274 #endif
2275 			fctl_jobdone(job);
2276 			thread_exit();
2277 
2278 			/* NOTREACHED */
2279 
2280 		case JOB_ATTACH_ULP: {
2281 			/*
2282 			 * This job is spawned in response to a ULP calling
2283 			 * fc_ulp_add().
2284 			 */
2285 
2286 			boolean_t do_attach_ulps = B_TRUE;
2287 
2288 			/*
2289 			 * If fp is detaching, we don't want to call
2290 			 * fp_startup_done as this asynchronous
2291 			 * notification may interfere with the re-attach.
2292 			 */
2293 
2294 			if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2295 			    FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2296 				do_attach_ulps = B_FALSE;
2297 			} else {
2298 				/*
2299 				 * We are going to force the transport
2300 				 * to attach to the ULPs, so set
2301 				 * fp_ulp_attach.  This will keep any
2302 				 * potential detach from occurring until
2303 				 * we are done.
2304 				 */
2305 				port->fp_ulp_attach = 1;
2306 			}
2307 
2308 			mutex_exit(&port->fp_mutex);
2309 
2310 			/*
2311 			 * NOTE: Since we just dropped the mutex, there is now
2312 			 * a race window where the fp_soft_state check above
2313 			 * could change here.  This race is covered because an
2314 			 * additional check was added in the functions hidden
2315 			 * under fp_startup_done().
2316 			 */
2317 			if (do_attach_ulps == B_TRUE) {
2318 				/*
2319 				 * This goes thru a bit of a convoluted call
2320 				 * chain before spawning off a DDI taskq
2321 				 * request to perform the actual attach
2322 				 * operations. Blocking can occur at a number
2323 				 * of points.
2324 				 */
2325 				fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2326 			}
2327 			job->job_result = FC_SUCCESS;
2328 			fctl_jobdone(job);
2329 			break;
2330 		}
2331 
2332 		case JOB_ULP_NOTIFY: {
2333 			/*
2334 			 * Pass state change notifications up to any/all
2335 			 * registered ULPs.
2336 			 */
2337 			uint32_t statec;
2338 
2339 			statec = job->job_ulp_listlen;
2340 			if (statec == FC_STATE_RESET_REQUESTED) {
2341 				port->fp_last_task = port->fp_task;
2342 				port->fp_task = FP_TASK_OFFLINE;
2343 				fp_port_offline(port, 0);
2344 				port->fp_task = port->fp_last_task;
2345 				port->fp_last_task = FP_TASK_IDLE;
2346 			}
2347 
2348 			if (--port->fp_statec_busy == 0) {
2349 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2350 			}
2351 
2352 			mutex_exit(&port->fp_mutex);
2353 
2354 			job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2355 			fctl_jobdone(job);
2356 			break;
2357 		}
2358 
2359 		case JOB_PLOGI_ONE:
2360 			/*
2361 			 * Issue a PLOGI to a single remote port. Multiple
2362 			 * PLOGIs to different remote ports may occur in
2363 			 * parallel.
2364 			 * This can create the fc_remote_port_t if it does not
2365 			 * already exist.
2366 			 */
2367 
2368 			mutex_exit(&port->fp_mutex);
2369 			d_id = (uint32_t *)job->job_private;
2370 			pd = fctl_get_remote_port_by_did(port, *d_id);
2371 
2372 			if (pd) {
2373 				mutex_enter(&pd->pd_mutex);
2374 				if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2375 					pd->pd_login_count++;
2376 					mutex_exit(&pd->pd_mutex);
2377 					job->job_result = FC_SUCCESS;
2378 					fctl_jobdone(job);
2379 					break;
2380 				}
2381 				mutex_exit(&pd->pd_mutex);
2382 			} else {
2383 				mutex_enter(&port->fp_mutex);
2384 				if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2385 					mutex_exit(&port->fp_mutex);
2386 					pd = fp_create_remote_port_by_ns(port,
2387 					    *d_id, KM_SLEEP);
2388 					if (pd == NULL) {
2389 						job->job_result = FC_FAILURE;
2390 						fctl_jobdone(job);
2391 						break;
2392 					}
2393 				} else {
2394 					mutex_exit(&port->fp_mutex);
2395 				}
2396 			}
2397 
2398 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2399 			job->job_counter = 1;
2400 
2401 			rval = fp_port_login(port, *d_id, job,
2402 			    FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2403 
2404 			if (rval != FC_SUCCESS) {
2405 				job->job_result = rval;
2406 				fctl_jobdone(job);
2407 			}
2408 			break;
2409 
2410 		case JOB_LOGO_ONE: {
2411 			/*
2412 			 * Issue a PLOGO to a single remote port. Multiple
2413 			 * PLOGOs to different remote ports may occur in
2414 			 * parallel.
2415 			 */
2416 			fc_remote_port_t *pd;
2417 
2418 #ifndef	__lock_lint
2419 			ASSERT(job->job_counter > 0);
2420 #endif
2421 
2422 			pd = (fc_remote_port_t *)job->job_ulp_pkts;
2423 
2424 			mutex_enter(&pd->pd_mutex);
2425 			if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2426 				mutex_exit(&pd->pd_mutex);
2427 				job->job_result = FC_LOGINREQ;
2428 				mutex_exit(&port->fp_mutex);
2429 				fctl_jobdone(job);
2430 				break;
2431 			}
2432 			if (pd->pd_login_count > 1) {
2433 				pd->pd_login_count--;
2434 				mutex_exit(&pd->pd_mutex);
2435 				job->job_result = FC_SUCCESS;
2436 				mutex_exit(&port->fp_mutex);
2437 				fctl_jobdone(job);
2438 				break;
2439 			}
2440 			mutex_exit(&pd->pd_mutex);
2441 			mutex_exit(&port->fp_mutex);
2442 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2443 			(void) fp_logout(port, pd, job);
2444 			break;
2445 		}
2446 
2447 		case JOB_FCIO_LOGIN:
2448 			/*
2449 			 * PLOGI initiated at ioctl request.
2450 			 */
2451 			mutex_exit(&port->fp_mutex);
2452 			job->job_result =
2453 			    fp_fcio_login(port, job->job_private, job);
2454 			fctl_jobdone(job);
2455 			break;
2456 
2457 		case JOB_FCIO_LOGOUT:
2458 			/*
2459 			 * PLOGO initiated at ioctl request.
2460 			 */
2461 			mutex_exit(&port->fp_mutex);
2462 			job->job_result =
2463 			    fp_fcio_logout(port, job->job_private, job);
2464 			fctl_jobdone(job);
2465 			break;
2466 
2467 		case JOB_PORT_GETMAP:
2468 		case JOB_PORT_GETMAP_PLOGI_ALL: {
2469 			port->fp_last_task = port->fp_task;
2470 			port->fp_task = FP_TASK_GETMAP;
2471 
2472 			switch (port->fp_topology) {
2473 			case FC_TOP_PRIVATE_LOOP:
2474 				job->job_counter = 1;
2475 
2476 				fp_get_loopmap(port, job);
2477 				mutex_exit(&port->fp_mutex);
2478 				fp_jobwait(job);
2479 				fctl_fillout_map(port,
2480 				    (fc_portmap_t **)job->job_private,
2481 				    (uint32_t *)job->job_arg, 1, 0, 0);
2482 				fctl_jobdone(job);
2483 				mutex_enter(&port->fp_mutex);
2484 				break;
2485 
2486 			case FC_TOP_PUBLIC_LOOP:
2487 			case FC_TOP_FABRIC:
2488 				mutex_exit(&port->fp_mutex);
2489 				job->job_counter = 1;
2490 
2491 				job->job_result = fp_ns_getmap(port,
2492 				    job, (fc_portmap_t **)job->job_private,
2493 				    (uint32_t *)job->job_arg,
2494 				    FCTL_GAN_START_ID);
2495 				fctl_jobdone(job);
2496 				mutex_enter(&port->fp_mutex);
2497 				break;
2498 
2499 			case FC_TOP_PT_PT:
2500 				mutex_exit(&port->fp_mutex);
2501 				fctl_fillout_map(port,
2502 				    (fc_portmap_t **)job->job_private,
2503 				    (uint32_t *)job->job_arg, 1, 0, 0);
2504 				fctl_jobdone(job);
2505 				mutex_enter(&port->fp_mutex);
2506 				break;
2507 
2508 			default:
2509 				mutex_exit(&port->fp_mutex);
2510 				fctl_jobdone(job);
2511 				mutex_enter(&port->fp_mutex);
2512 				break;
2513 			}
2514 			port->fp_task = port->fp_last_task;
2515 			port->fp_last_task = FP_TASK_IDLE;
2516 			mutex_exit(&port->fp_mutex);
2517 			break;
2518 		}
2519 
2520 		case JOB_PORT_OFFLINE: {
2521 			fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2522 
2523 			port->fp_last_task = port->fp_task;
2524 			port->fp_task = FP_TASK_OFFLINE;
2525 
2526 			if (port->fp_statec_busy > 2) {
2527 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2528 				fp_port_offline(port, 0);
2529 				if (--port->fp_statec_busy == 0) {
2530 					port->fp_soft_state &=
2531 					    ~FP_SOFT_IN_STATEC_CB;
2532 				}
2533 			} else {
2534 				fp_port_offline(port, 1);
2535 			}
2536 
2537 			port->fp_task = port->fp_last_task;
2538 			port->fp_last_task = FP_TASK_IDLE;
2539 
2540 			mutex_exit(&port->fp_mutex);
2541 
2542 			fctl_jobdone(job);
2543 			break;
2544 		}
2545 
2546 		case JOB_PORT_STARTUP: {
2547 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2548 				if (port->fp_statec_busy > 1) {
2549 					mutex_exit(&port->fp_mutex);
2550 					break;
2551 				}
2552 				mutex_exit(&port->fp_mutex);
2553 
2554 				FP_TRACE(FP_NHEAD2(9, rval),
2555 				    "Topology discovery failed");
2556 				break;
2557 			}
2558 
2559 			/*
2560 			 * Attempt building device handles in case
2561 			 * of private Loop.
2562 			 */
2563 			if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2564 				job->job_counter = 1;
2565 
2566 				fp_get_loopmap(port, job);
2567 				mutex_exit(&port->fp_mutex);
2568 				fp_jobwait(job);
2569 				mutex_enter(&port->fp_mutex);
2570 				if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2571 					ASSERT(port->fp_total_devices == 0);
2572 					port->fp_total_devices =
2573 					    port->fp_dev_count;
2574 				}
2575 			} else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2576 				/*
2577 				 * Hack to avoid state changes going up early
2578 				 */
2579 				port->fp_statec_busy++;
2580 				port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2581 
2582 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2583 				fp_fabric_online(port, job);
2584 				job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2585 			}
2586 			mutex_exit(&port->fp_mutex);
2587 			fctl_jobdone(job);
2588 			break;
2589 		}
2590 
2591 		case JOB_PORT_ONLINE: {
2592 			char		*newtop;
2593 			char		*oldtop;
2594 			uint32_t	old_top;
2595 
2596 			fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2597 
2598 			/*
2599 			 * Bail out early if there are a lot of
2600 			 * state changes in the pipeline
2601 			 */
2602 			if (port->fp_statec_busy > 1) {
2603 				--port->fp_statec_busy;
2604 				mutex_exit(&port->fp_mutex);
2605 				fctl_jobdone(job);
2606 				break;
2607 			}
2608 
2609 			switch (old_top = port->fp_topology) {
2610 			case FC_TOP_PRIVATE_LOOP:
2611 				oldtop = "Private Loop";
2612 				break;
2613 
2614 			case FC_TOP_PUBLIC_LOOP:
2615 				oldtop = "Public Loop";
2616 				break;
2617 
2618 			case FC_TOP_PT_PT:
2619 				oldtop = "Point to Point";
2620 				break;
2621 
2622 			case FC_TOP_FABRIC:
2623 				oldtop = "Fabric";
2624 				break;
2625 
2626 			default:
2627 				oldtop = NULL;
2628 				break;
2629 			}
2630 
2631 			port->fp_last_task = port->fp_task;
2632 			port->fp_task = FP_TASK_ONLINE;
2633 
2634 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2635 
2636 				port->fp_task = port->fp_last_task;
2637 				port->fp_last_task = FP_TASK_IDLE;
2638 
2639 				if (port->fp_statec_busy > 1) {
2640 					--port->fp_statec_busy;
2641 					mutex_exit(&port->fp_mutex);
2642 					break;
2643 				}
2644 
2645 				port->fp_state = FC_STATE_OFFLINE;
2646 
2647 				FP_TRACE(FP_NHEAD2(9, rval),
2648 				    "Topology discovery failed");
2649 
2650 				if (--port->fp_statec_busy == 0) {
2651 					port->fp_soft_state &=
2652 					    ~FP_SOFT_IN_STATEC_CB;
2653 				}
2654 
2655 				if (port->fp_offline_tid == NULL) {
2656 					port->fp_offline_tid =
2657 					    timeout(fp_offline_timeout,
2658 					    (caddr_t)port, fp_offline_ticks);
2659 				}
2660 
2661 				mutex_exit(&port->fp_mutex);
2662 				break;
2663 			}
2664 
2665 			switch (port->fp_topology) {
2666 			case FC_TOP_PRIVATE_LOOP:
2667 				newtop = "Private Loop";
2668 				break;
2669 
2670 			case FC_TOP_PUBLIC_LOOP:
2671 				newtop = "Public Loop";
2672 				break;
2673 
2674 			case FC_TOP_PT_PT:
2675 				newtop = "Point to Point";
2676 				break;
2677 
2678 			case FC_TOP_FABRIC:
2679 				newtop = "Fabric";
2680 				break;
2681 
2682 			default:
2683 				newtop = NULL;
2684 				break;
2685 			}
2686 
2687 			if (oldtop && newtop && strcmp(oldtop, newtop)) {
2688 				fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2689 				    "Change in FC Topology old = %s new = %s",
2690 				    oldtop, newtop);
2691 			}
2692 
2693 			switch (port->fp_topology) {
2694 			case FC_TOP_PRIVATE_LOOP: {
2695 				int orphan = (old_top == FC_TOP_FABRIC ||
2696 				    old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2697 
2698 				mutex_exit(&port->fp_mutex);
2699 				fp_loop_online(port, job, orphan);
2700 				break;
2701 			}
2702 
2703 			case FC_TOP_PUBLIC_LOOP:
2704 				/* FALLTHROUGH */
2705 			case FC_TOP_FABRIC:
2706 				fp_fabric_online(port, job);
2707 				mutex_exit(&port->fp_mutex);
2708 				break;
2709 
2710 			case FC_TOP_PT_PT:
2711 				fp_p2p_online(port, job);
2712 				mutex_exit(&port->fp_mutex);
2713 				break;
2714 
2715 			default:
2716 				if (--port->fp_statec_busy != 0) {
2717 					/*
2718 					 * Watch curiously at what the next
2719 					 * state transition can do.
2720 					 */
2721 					mutex_exit(&port->fp_mutex);
2722 					break;
2723 				}
2724 
2725 				FP_TRACE(FP_NHEAD2(9, 0),
2726 				    "Topology Unknown, Offlining the port..");
2727 
2728 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2729 				port->fp_state = FC_STATE_OFFLINE;
2730 
2731 				if (port->fp_offline_tid == NULL) {
2732 					port->fp_offline_tid =
2733 					    timeout(fp_offline_timeout,
2734 					    (caddr_t)port, fp_offline_ticks);
2735 				}
2736 				mutex_exit(&port->fp_mutex);
2737 				break;
2738 			}
2739 
2740 			mutex_enter(&port->fp_mutex);
2741 
2742 			port->fp_task = port->fp_last_task;
2743 			port->fp_last_task = FP_TASK_IDLE;
2744 
2745 			mutex_exit(&port->fp_mutex);
2746 
2747 			fctl_jobdone(job);
2748 			break;
2749 		}
2750 
2751 		case JOB_PLOGI_GROUP: {
2752 			mutex_exit(&port->fp_mutex);
2753 			fp_plogi_group(port, job);
2754 			break;
2755 		}
2756 
2757 		case JOB_UNSOL_REQUEST: {
2758 			mutex_exit(&port->fp_mutex);
2759 			fp_handle_unsol_buf(port,
2760 			    (fc_unsol_buf_t *)job->job_private, job);
2761 			fctl_dealloc_job(job);
2762 			break;
2763 		}
2764 
2765 		case JOB_NS_CMD: {
2766 			fctl_ns_req_t *ns_cmd;
2767 
2768 			mutex_exit(&port->fp_mutex);
2769 
2770 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2771 			ns_cmd = (fctl_ns_req_t *)job->job_private;
2772 			if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2773 			    ns_cmd->ns_cmd_code > NS_DA_ID) {
2774 				job->job_result = FC_BADCMD;
2775 				fctl_jobdone(job);
2776 				break;
2777 			}
2778 
2779 			if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2780 				if (ns_cmd->ns_pd != NULL) {
2781 					job->job_result = FC_BADOBJECT;
2782 					fctl_jobdone(job);
2783 					break;
2784 				}
2785 
2786 				job->job_counter = 1;
2787 
2788 				rval = fp_ns_reg(port, ns_cmd->ns_pd,
2789 				    ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2790 
2791 				if (rval != FC_SUCCESS) {
2792 					job->job_result = rval;
2793 					fctl_jobdone(job);
2794 				}
2795 				break;
2796 			}
2797 			job->job_result = FC_SUCCESS;
2798 			job->job_counter = 1;
2799 
2800 			rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2801 			if (rval != FC_SUCCESS) {
2802 				fctl_jobdone(job);
2803 			}
2804 			break;
2805 		}
2806 
2807 		case JOB_LINK_RESET: {
2808 			la_wwn_t *pwwn;
2809 			uint32_t topology;
2810 
2811 			pwwn = (la_wwn_t *)job->job_private;
2812 			ASSERT(pwwn != NULL);
2813 
2814 			topology = port->fp_topology;
2815 			mutex_exit(&port->fp_mutex);
2816 
2817 			if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2818 			    topology == FC_TOP_PRIVATE_LOOP) {
2819 				job->job_flags |= JOB_TYPE_FP_ASYNC;
2820 				rval = port->fp_fca_tran->fca_reset(
2821 				    port->fp_fca_handle, FC_FCA_LINK_RESET);
2822 				job->job_result = rval;
2823 				fp_jobdone(job);
2824 			} else {
2825 				ASSERT((job->job_flags &
2826 				    JOB_TYPE_FP_ASYNC) == 0);
2827 
2828 				if (FC_IS_TOP_SWITCH(topology)) {
2829 					rval = fp_remote_lip(port, pwwn,
2830 					    KM_SLEEP, job);
2831 				} else {
2832 					rval = FC_FAILURE;
2833 				}
2834 				if (rval != FC_SUCCESS) {
2835 					job->job_result = rval;
2836 				}
2837 				fctl_jobdone(job);
2838 			}
2839 			break;
2840 		}
2841 
2842 		default:
2843 			mutex_exit(&port->fp_mutex);
2844 			job->job_result = FC_BADCMD;
2845 			fctl_jobdone(job);
2846 			break;
2847 		}
2848 	}
2849 	/* NOTREACHED */
2850 }
2851 
2852 
2853 /*
2854  * Perform FC port bring up initialization
2855  */
2856 static int
2857 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2858 {
2859 	int		rval;
2860 	uint32_t	state;
2861 	uint32_t	src_id;
2862 	fc_lilpmap_t	*lilp_map;
2863 
2864 	ASSERT(MUTEX_HELD(&port->fp_mutex));
2865 	ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2866 
2867 	FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2868 	    " port=%p, job=%p", port, job);
2869 
2870 	port->fp_topology = FC_TOP_UNKNOWN;
2871 	port->fp_port_id.port_id = 0;
2872 	state = FC_PORT_STATE_MASK(port->fp_state);
2873 
2874 	if (state == FC_STATE_OFFLINE) {
2875 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2876 		job->job_result = FC_OFFLINE;
2877 		mutex_exit(&port->fp_mutex);
2878 		fctl_jobdone(job);
2879 		mutex_enter(&port->fp_mutex);
2880 		return (FC_OFFLINE);
2881 	}
2882 
2883 	if (state == FC_STATE_LOOP) {
2884 		port->fp_port_type.port_type = FC_NS_PORT_NL;
2885 		mutex_exit(&port->fp_mutex);
2886 
2887 		lilp_map = &port->fp_lilp_map;
2888 		if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2889 			job->job_result = FC_FAILURE;
2890 			fctl_jobdone(job);
2891 
2892 			FP_TRACE(FP_NHEAD1(9, rval),
2893 			    "LILP map Invalid or not present");
2894 			mutex_enter(&port->fp_mutex);
2895 			return (FC_FAILURE);
2896 		}
2897 
2898 		if (lilp_map->lilp_length == 0) {
2899 			job->job_result = FC_NO_MAP;
2900 			fctl_jobdone(job);
2901 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2902 			    "LILP map length zero");
2903 			mutex_enter(&port->fp_mutex);
2904 			return (FC_NO_MAP);
2905 		}
2906 		src_id = lilp_map->lilp_myalpa & 0xFF;
2907 	} else {
2908 		fc_remote_port_t	*pd;
2909 		fc_fca_pm_t		pm;
2910 		fc_fca_p2p_info_t	p2p_info;
2911 		int			pd_recepient;
2912 
2913 		/*
2914 		 * Get P2P remote port info if possible
2915 		 */
2916 		bzero((caddr_t)&pm, sizeof (pm));
2917 
2918 		pm.pm_cmd_flags = FC_FCA_PM_READ;
2919 		pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2920 		pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2921 		pm.pm_data_buf = (caddr_t)&p2p_info;
2922 
2923 		rval = port->fp_fca_tran->fca_port_manage(
2924 		    port->fp_fca_handle, &pm);
2925 
2926 		if (rval == FC_SUCCESS) {
2927 			port->fp_port_id.port_id = p2p_info.fca_d_id;
2928 			port->fp_port_type.port_type = FC_NS_PORT_N;
2929 			port->fp_topology = FC_TOP_PT_PT;
2930 			port->fp_total_devices = 1;
2931 			pd_recepient = fctl_wwn_cmp(
2932 			    &port->fp_service_params.nport_ww_name,
2933 			    &p2p_info.pwwn) < 0 ?
2934 			    PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2935 			mutex_exit(&port->fp_mutex);
2936 			pd = fctl_create_remote_port(port,
2937 			    &p2p_info.nwwn,
2938 			    &p2p_info.pwwn,
2939 			    p2p_info.d_id,
2940 			    pd_recepient, KM_NOSLEEP);
2941 			FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2942 			    " P2P port=%p pd=%p", port, pd);
2943 			mutex_enter(&port->fp_mutex);
2944 			return (FC_SUCCESS);
2945 		}
2946 		port->fp_port_type.port_type = FC_NS_PORT_N;
2947 		mutex_exit(&port->fp_mutex);
2948 		src_id = 0;
2949 	}
2950 
2951 	job->job_counter = 1;
2952 	job->job_result = FC_SUCCESS;
2953 
2954 	if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2955 	    KM_SLEEP)) != FC_SUCCESS) {
2956 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2957 		job->job_result = FC_FAILURE;
2958 		fctl_jobdone(job);
2959 
2960 		mutex_enter(&port->fp_mutex);
2961 		if (port->fp_statec_busy <= 1) {
2962 			mutex_exit(&port->fp_mutex);
2963 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
2964 			    "Couldn't transport FLOGI");
2965 			mutex_enter(&port->fp_mutex);
2966 		}
2967 		return (FC_FAILURE);
2968 	}
2969 
2970 	fp_jobwait(job);
2971 
2972 	mutex_enter(&port->fp_mutex);
2973 	if (job->job_result == FC_SUCCESS) {
2974 		if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2975 			mutex_exit(&port->fp_mutex);
2976 			fp_ns_init(port, job, KM_SLEEP);
2977 			mutex_enter(&port->fp_mutex);
2978 		}
2979 	} else {
2980 		if (state == FC_STATE_LOOP) {
2981 			port->fp_topology = FC_TOP_PRIVATE_LOOP;
2982 			port->fp_port_id.port_id =
2983 			    port->fp_lilp_map.lilp_myalpa & 0xFF;
2984 		}
2985 	}
2986 
2987 	FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
2988 	    port, job);
2989 
2990 	return (FC_SUCCESS);
2991 }
2992 
2993 
2994 /*
2995  * Perform ULP invocations following FC port startup
2996  */
2997 /* ARGSUSED */
2998 static void
2999 fp_startup_done(opaque_t arg, uchar_t result)
3000 {
3001 	fc_local_port_t *port = arg;
3002 
3003 	fp_attach_ulps(port, FC_CMD_ATTACH);
3004 
3005 	FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3006 }
3007 
3008 
3009 /*
3010  * Perform ULP port attach
3011  */
3012 static void
3013 fp_ulp_port_attach(void *arg)
3014 {
3015 	fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3016 	fc_local_port_t	 *port = att->att_port;
3017 
3018 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3019 	    " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3020 
3021 	fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3022 
3023 	if (att->att_need_pm_idle == B_TRUE) {
3024 		fctl_idle_port(port);
3025 	}
3026 
3027 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3028 	    " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3029 
3030 	mutex_enter(&att->att_port->fp_mutex);
3031 	att->att_port->fp_ulp_attach = 0;
3032 
3033 	port->fp_task = port->fp_last_task;
3034 	port->fp_last_task = FP_TASK_IDLE;
3035 
3036 	cv_signal(&att->att_port->fp_attach_cv);
3037 
3038 	mutex_exit(&att->att_port->fp_mutex);
3039 
3040 	kmem_free(att, sizeof (fp_soft_attach_t));
3041 }
3042 
3043 /*
3044  * Entry point to funnel all requests down to FCAs
3045  */
3046 static int
3047 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3048 {
3049 	int rval;
3050 
3051 	mutex_enter(&port->fp_mutex);
3052 	if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3053 	    (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3054 	    FC_STATE_OFFLINE))) {
3055 		/*
3056 		 * This means there is more than one state change
3057 		 * at this point of time - Since they are processed
3058 		 * serially, any processing of the current one should
3059 		 * be failed, failed and move up in processing the next
3060 		 */
3061 		cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3062 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3063 		if (cmd->cmd_job) {
3064 			/*
3065 			 * A state change that is going to be invalidated
3066 			 * by another one already in the port driver's queue
3067 			 * need not go up to all ULPs. This will minimize
3068 			 * needless processing and ripples in ULP modules
3069 			 */
3070 			cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3071 		}
3072 		mutex_exit(&port->fp_mutex);
3073 		return (FC_STATEC_BUSY);
3074 	}
3075 
3076 	if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3077 		cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3078 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3079 		mutex_exit(&port->fp_mutex);
3080 
3081 		return (FC_OFFLINE);
3082 	}
3083 	mutex_exit(&port->fp_mutex);
3084 
3085 	rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3086 	if (rval != FC_SUCCESS) {
3087 		if (rval == FC_TRAN_BUSY) {
3088 			cmd->cmd_retry_interval = fp_retry_delay;
3089 			rval = fp_retry_cmd(&cmd->cmd_pkt);
3090 			if (rval == FC_FAILURE) {
3091 				cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3092 			}
3093 		}
3094 	} else {
3095 		mutex_enter(&port->fp_mutex);
3096 		port->fp_out_fpcmds++;
3097 		mutex_exit(&port->fp_mutex);
3098 	}
3099 
3100 	return (rval);
3101 }
3102 
3103 
3104 /*
3105  * Each time a timeout kicks in, walk the wait queue, decrement the
3106  * the retry_interval, when the retry_interval becomes less than
3107  * or equal to zero, re-transport the command: If the re-transport
3108  * fails with BUSY, enqueue the command in the wait queue.
3109  *
3110  * In order to prevent looping forever because of commands enqueued
3111  * from within this function itself, save the current tail pointer
3112  * (in cur_tail) and exit the loop after serving this command.
3113  */
3114 static void
3115 fp_resendcmd(void *port_handle)
3116 {
3117 	int		rval;
3118 	fc_local_port_t	*port;
3119 	fp_cmd_t	*cmd;
3120 	fp_cmd_t	*cur_tail;
3121 
3122 	port = port_handle;
3123 	mutex_enter(&port->fp_mutex);
3124 	cur_tail = port->fp_wait_tail;
3125 	mutex_exit(&port->fp_mutex);
3126 
3127 	while ((cmd = fp_deque_cmd(port)) != NULL) {
3128 		cmd->cmd_retry_interval -= fp_retry_ticker;
3129 		/* Check if we are detaching */
3130 		if (port->fp_soft_state &
3131 		    (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3132 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3133 			cmd->cmd_pkt.pkt_reason = 0;
3134 			fp_iodone(cmd);
3135 		} else if (cmd->cmd_retry_interval <= 0) {
3136 			rval = cmd->cmd_transport(port->fp_fca_handle,
3137 			    &cmd->cmd_pkt);
3138 
3139 			if (rval != FC_SUCCESS) {
3140 				if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3141 					if (--cmd->cmd_retry_count) {
3142 						fp_enque_cmd(port, cmd);
3143 						if (cmd == cur_tail) {
3144 							break;
3145 						}
3146 						continue;
3147 					}
3148 					cmd->cmd_pkt.pkt_state =
3149 					    FC_PKT_TRAN_BSY;
3150 				} else {
3151 					cmd->cmd_pkt.pkt_state =
3152 					    FC_PKT_TRAN_ERROR;
3153 				}
3154 				cmd->cmd_pkt.pkt_reason = 0;
3155 				fp_iodone(cmd);
3156 			} else {
3157 				mutex_enter(&port->fp_mutex);
3158 				port->fp_out_fpcmds++;
3159 				mutex_exit(&port->fp_mutex);
3160 			}
3161 		} else {
3162 			fp_enque_cmd(port, cmd);
3163 		}
3164 
3165 		if (cmd == cur_tail) {
3166 			break;
3167 		}
3168 	}
3169 
3170 	mutex_enter(&port->fp_mutex);
3171 	if (port->fp_wait_head) {
3172 		timeout_id_t tid;
3173 
3174 		mutex_exit(&port->fp_mutex);
3175 		tid = timeout(fp_resendcmd, (caddr_t)port,
3176 		    fp_retry_ticks);
3177 		mutex_enter(&port->fp_mutex);
3178 		port->fp_wait_tid = tid;
3179 	} else {
3180 		port->fp_wait_tid = NULL;
3181 	}
3182 	mutex_exit(&port->fp_mutex);
3183 }
3184 
3185 
3186 /*
3187  * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3188  *
3189  * Yes, as you can see below, cmd_retry_count is used here too.	 That means
3190  * the retries for BUSY are less if there were transport failures (transport
3191  * failure means fca_transport failure). The goal is not to exceed overall
3192  * retries set in the cmd_retry_count (whatever may be the reason for retry)
3193  *
3194  * Return Values:
3195  *	FC_SUCCESS
3196  *	FC_FAILURE
3197  */
3198 static int
3199 fp_retry_cmd(fc_packet_t *pkt)
3200 {
3201 	fp_cmd_t *cmd;
3202 
3203 	cmd = pkt->pkt_ulp_private;
3204 
3205 	if (--cmd->cmd_retry_count) {
3206 		fp_enque_cmd(cmd->cmd_port, cmd);
3207 		return (FC_SUCCESS);
3208 	} else {
3209 		return (FC_FAILURE);
3210 	}
3211 }
3212 
3213 
3214 /*
3215  * Queue up FC packet for deferred retry
3216  */
3217 static void
3218 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3219 {
3220 	timeout_id_t tid;
3221 
3222 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3223 
3224 #ifdef	DEBUG
3225 	fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3226 	    "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3227 #endif
3228 
3229 	mutex_enter(&port->fp_mutex);
3230 	if (port->fp_wait_tail) {
3231 		port->fp_wait_tail->cmd_next = cmd;
3232 		port->fp_wait_tail = cmd;
3233 	} else {
3234 		ASSERT(port->fp_wait_head == NULL);
3235 		port->fp_wait_head = port->fp_wait_tail = cmd;
3236 		if (port->fp_wait_tid == NULL) {
3237 			mutex_exit(&port->fp_mutex);
3238 			tid = timeout(fp_resendcmd, (caddr_t)port,
3239 			    fp_retry_ticks);
3240 			mutex_enter(&port->fp_mutex);
3241 			port->fp_wait_tid = tid;
3242 		}
3243 	}
3244 	mutex_exit(&port->fp_mutex);
3245 }
3246 
3247 
3248 /*
3249  * Handle all RJT codes
3250  */
3251 static int
3252 fp_handle_reject(fc_packet_t *pkt)
3253 {
3254 	int		rval = FC_FAILURE;
3255 	uchar_t		next_class;
3256 	fp_cmd_t	*cmd;
3257 	fc_local_port_t *port;
3258 
3259 	cmd = pkt->pkt_ulp_private;
3260 	port = cmd->cmd_port;
3261 
3262 	switch (pkt->pkt_state) {
3263 	case FC_PKT_FABRIC_RJT:
3264 	case FC_PKT_NPORT_RJT:
3265 		if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3266 			next_class = fp_get_nextclass(cmd->cmd_port,
3267 			    FC_TRAN_CLASS(pkt->pkt_tran_flags));
3268 
3269 			if (next_class == FC_TRAN_CLASS_INVALID) {
3270 				return (rval);
3271 			}
3272 			pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3273 			pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3274 
3275 			rval = fp_sendcmd(cmd->cmd_port, cmd,
3276 			    cmd->cmd_port->fp_fca_handle);
3277 
3278 			if (rval != FC_SUCCESS) {
3279 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
3280 			}
3281 		}
3282 		break;
3283 
3284 	case FC_PKT_LS_RJT:
3285 	case FC_PKT_BA_RJT:
3286 		if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3287 		    (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3288 			cmd->cmd_retry_interval = fp_retry_delay;
3289 			rval = fp_retry_cmd(pkt);
3290 		}
3291 		break;
3292 
3293 	case FC_PKT_FS_RJT:
3294 		if (pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) {
3295 			cmd->cmd_retry_interval = fp_retry_delay;
3296 			rval = fp_retry_cmd(pkt);
3297 		}
3298 		break;
3299 
3300 	case FC_PKT_LOCAL_RJT:
3301 		if (pkt->pkt_reason == FC_REASON_QFULL) {
3302 			cmd->cmd_retry_interval = fp_retry_delay;
3303 			rval = fp_retry_cmd(pkt);
3304 		}
3305 		break;
3306 
3307 	default:
3308 		FP_TRACE(FP_NHEAD1(1, 0),
3309 		    "fp_handle_reject(): Invalid pkt_state");
3310 		break;
3311 	}
3312 
3313 	return (rval);
3314 }
3315 
3316 
3317 /*
3318  * Return the next class of service supported by the FCA
3319  */
3320 static uchar_t
3321 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3322 {
3323 	uchar_t next_class;
3324 
3325 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3326 
3327 	switch (cur_class) {
3328 	case FC_TRAN_CLASS_INVALID:
3329 		if (port->fp_cos & FC_NS_CLASS1) {
3330 			next_class = FC_TRAN_CLASS1;
3331 			break;
3332 		}
3333 		/* FALLTHROUGH */
3334 
3335 	case FC_TRAN_CLASS1:
3336 		if (port->fp_cos & FC_NS_CLASS2) {
3337 			next_class = FC_TRAN_CLASS2;
3338 			break;
3339 		}
3340 		/* FALLTHROUGH */
3341 
3342 	case FC_TRAN_CLASS2:
3343 		if (port->fp_cos & FC_NS_CLASS3) {
3344 			next_class = FC_TRAN_CLASS3;
3345 			break;
3346 		}
3347 		/* FALLTHROUGH */
3348 
3349 	case FC_TRAN_CLASS3:
3350 	default:
3351 		next_class = FC_TRAN_CLASS_INVALID;
3352 		break;
3353 	}
3354 
3355 	return (next_class);
3356 }
3357 
3358 
3359 /*
3360  * Determine if a class of service is supported by the FCA
3361  */
3362 static int
3363 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3364 {
3365 	int rval;
3366 
3367 	switch (tran_class) {
3368 	case FC_TRAN_CLASS1:
3369 		if (cos & FC_NS_CLASS1) {
3370 			rval = FC_SUCCESS;
3371 		} else {
3372 			rval = FC_FAILURE;
3373 		}
3374 		break;
3375 
3376 	case FC_TRAN_CLASS2:
3377 		if (cos & FC_NS_CLASS2) {
3378 			rval = FC_SUCCESS;
3379 		} else {
3380 			rval = FC_FAILURE;
3381 		}
3382 		break;
3383 
3384 	case FC_TRAN_CLASS3:
3385 		if (cos & FC_NS_CLASS3) {
3386 			rval = FC_SUCCESS;
3387 		} else {
3388 			rval = FC_FAILURE;
3389 		}
3390 		break;
3391 
3392 	default:
3393 		rval = FC_FAILURE;
3394 		break;
3395 	}
3396 
3397 	return (rval);
3398 }
3399 
3400 
3401 /*
3402  * Dequeue FC packet for retry
3403  */
3404 static fp_cmd_t *
3405 fp_deque_cmd(fc_local_port_t *port)
3406 {
3407 	fp_cmd_t *cmd;
3408 
3409 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3410 
3411 	mutex_enter(&port->fp_mutex);
3412 
3413 	if (port->fp_wait_head == NULL) {
3414 		/*
3415 		 * To avoid races, NULL the fp_wait_tid as
3416 		 * we are about to exit the timeout thread.
3417 		 */
3418 		port->fp_wait_tid = NULL;
3419 		mutex_exit(&port->fp_mutex);
3420 		return (NULL);
3421 	}
3422 
3423 	cmd = port->fp_wait_head;
3424 	port->fp_wait_head = cmd->cmd_next;
3425 	cmd->cmd_next = NULL;
3426 
3427 	if (port->fp_wait_head == NULL) {
3428 		port->fp_wait_tail = NULL;
3429 	}
3430 	mutex_exit(&port->fp_mutex);
3431 
3432 	return (cmd);
3433 }
3434 
3435 
3436 /*
3437  * Wait for job completion
3438  */
3439 static void
3440 fp_jobwait(job_request_t *job)
3441 {
3442 	sema_p(&job->job_port_sema);
3443 }
3444 
3445 
3446 /*
3447  * Convert FC packet state to FC errno
3448  */
3449 int
3450 fp_state_to_rval(uchar_t state)
3451 {
3452 	int count;
3453 
3454 	for (count = 0; count < sizeof (fp_xlat) /
3455 	    sizeof (fp_xlat[0]); count++) {
3456 		if (fp_xlat[count].xlat_state == state) {
3457 			return (fp_xlat[count].xlat_rval);
3458 		}
3459 	}
3460 
3461 	return (FC_FAILURE);
3462 }
3463 
3464 
3465 /*
3466  * For Synchronous I/O requests, the caller is
3467  * expected to do fctl_jobdone(if necessary)
3468  *
3469  * We want to preserve at least one failure in the
3470  * job_result if it happens.
3471  *
3472  */
3473 static void
3474 fp_iodone(fp_cmd_t *cmd)
3475 {
3476 	fc_packet_t		*ulp_pkt = cmd->cmd_ulp_pkt;
3477 	job_request_t		*job = cmd->cmd_job;
3478 	fc_remote_port_t	*pd = cmd->cmd_pkt.pkt_pd;
3479 
3480 	ASSERT(job != NULL);
3481 	ASSERT(cmd->cmd_port != NULL);
3482 	ASSERT(&cmd->cmd_pkt != NULL);
3483 
3484 	mutex_enter(&job->job_mutex);
3485 	if (job->job_result == FC_SUCCESS) {
3486 		job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3487 	}
3488 	mutex_exit(&job->job_mutex);
3489 
3490 	if (pd) {
3491 		mutex_enter(&pd->pd_mutex);
3492 		pd->pd_flags = PD_IDLE;
3493 		mutex_exit(&pd->pd_mutex);
3494 	}
3495 
3496 	if (ulp_pkt) {
3497 		if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3498 		    FP_IS_PKT_ERROR(ulp_pkt)) {
3499 			fc_local_port_t		*port;
3500 			fc_remote_node_t	*node;
3501 
3502 			port = cmd->cmd_port;
3503 
3504 			mutex_enter(&pd->pd_mutex);
3505 			pd->pd_state = PORT_DEVICE_INVALID;
3506 			pd->pd_ref_count--;
3507 			node = pd->pd_remote_nodep;
3508 			mutex_exit(&pd->pd_mutex);
3509 
3510 			ASSERT(node != NULL);
3511 			ASSERT(port != NULL);
3512 
3513 			if (fctl_destroy_remote_port(port, pd) == 0) {
3514 				fctl_destroy_remote_node(node);
3515 			}
3516 
3517 			ulp_pkt->pkt_pd = NULL;
3518 		}
3519 
3520 		ulp_pkt->pkt_comp(ulp_pkt);
3521 	}
3522 
3523 	fp_free_pkt(cmd);
3524 	fp_jobdone(job);
3525 }
3526 
3527 
3528 /*
3529  * Job completion handler
3530  */
3531 static void
3532 fp_jobdone(job_request_t *job)
3533 {
3534 	mutex_enter(&job->job_mutex);
3535 	ASSERT(job->job_counter > 0);
3536 
3537 	if (--job->job_counter != 0) {
3538 		mutex_exit(&job->job_mutex);
3539 		return;
3540 	}
3541 
3542 	if (job->job_ulp_pkts) {
3543 		ASSERT(job->job_ulp_listlen > 0);
3544 		kmem_free(job->job_ulp_pkts,
3545 		    sizeof (fc_packet_t *) * job->job_ulp_listlen);
3546 	}
3547 
3548 	if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3549 		mutex_exit(&job->job_mutex);
3550 		fctl_jobdone(job);
3551 	} else {
3552 		mutex_exit(&job->job_mutex);
3553 		sema_v(&job->job_port_sema);
3554 	}
3555 }
3556 
3557 
3558 /*
3559  * Try to perform shutdown of a port during a detach. No return
3560  * value since the detach should not fail because the port shutdown
3561  * failed.
3562  */
3563 static void
3564 fp_port_shutdown(fc_local_port_t *port, job_request_t *job)
3565 {
3566 	int			index;
3567 	int			count;
3568 	int			flags;
3569 	fp_cmd_t		*cmd;
3570 	struct pwwn_hash	*head;
3571 	fc_remote_port_t	*pd;
3572 
3573 	ASSERT(MUTEX_HELD(&port->fp_mutex));
3574 
3575 	job->job_result = FC_SUCCESS;
3576 
3577 	if (port->fp_taskq) {
3578 		/*
3579 		 * We must release the mutex here to ensure that other
3580 		 * potential jobs can complete their processing.  Many
3581 		 * also need this mutex.
3582 		 */
3583 		mutex_exit(&port->fp_mutex);
3584 		taskq_wait(port->fp_taskq);
3585 		mutex_enter(&port->fp_mutex);
3586 	}
3587 
3588 	if (port->fp_offline_tid) {
3589 		timeout_id_t tid;
3590 
3591 		tid = port->fp_offline_tid;
3592 		port->fp_offline_tid = NULL;
3593 		mutex_exit(&port->fp_mutex);
3594 		(void) untimeout(tid);
3595 		mutex_enter(&port->fp_mutex);
3596 	}
3597 
3598 	if (port->fp_wait_tid) {
3599 		timeout_id_t tid;
3600 
3601 		tid = port->