1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * NOT a DDI compliant Sun Fibre Channel port driver(fp)
26  *
27  */
28 
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/uio.h>
34 #include <sys/buf.h>
35 #include <sys/modctl.h>
36 #include <sys/open.h>
37 #include <sys/file.h>
38 #include <sys/kmem.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/thread.h>
42 #include <sys/var.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stat.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/promif.h>
48 #include <sys/nvpair.h>
49 #include <sys/byteorder.h>
50 #include <sys/scsi/scsi.h>
51 #include <sys/fibre-channel/fc.h>
52 #include <sys/fibre-channel/impl/fc_ulpif.h>
53 #include <sys/fibre-channel/impl/fc_fcaif.h>
54 #include <sys/fibre-channel/impl/fctl_private.h>
55 #include <sys/fibre-channel/impl/fc_portif.h>
56 #include <sys/fibre-channel/impl/fp.h>
57 
58 /* These are defined in fctl.c! */
59 extern int did_table_size;
60 extern int pwwn_table_size;
61 
62 static struct cb_ops fp_cb_ops = {
63 	fp_open,			/* open */
64 	fp_close,			/* close */
65 	nodev,				/* strategy */
66 	nodev,				/* print */
67 	nodev,				/* dump */
68 	nodev,				/* read */
69 	nodev,				/* write */
70 	fp_ioctl,			/* ioctl */
71 	nodev,				/* devmap */
72 	nodev,				/* mmap */
73 	nodev,				/* segmap */
74 	nochpoll,			/* chpoll */
75 	ddi_prop_op,			/* cb_prop_op */
76 	0,				/* streamtab */
77 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
78 	CB_REV,				/* rev */
79 	nodev,				/* aread */
80 	nodev				/* awrite */
81 };
82 
83 static struct dev_ops fp_ops = {
84 	DEVO_REV,	/* build revision */
85 	0,		/* reference count */
86 	fp_getinfo,	/* getinfo */
87 	nulldev,	/* identify - Obsoleted */
88 	nulldev,	/* probe */
89 	fp_attach,	/* attach */
90 	fp_detach,	/* detach */
91 	nodev,		/* reset */
92 	&fp_cb_ops,	/* cb_ops */
93 	NULL,		/* bus_ops */
94 	fp_power	/* power */
95 };
96 
97 #define	FP_VERSION		"1.96"
98 #define	FP_NAME_VERSION		"SunFC Port v" FP_VERSION
99 
100 char *fp_version = FP_NAME_VERSION;
101 
102 static struct modldrv modldrv = {
103 	&mod_driverops,			/* Type of Module */
104 	FP_NAME_VERSION,		/* Name/Version of fp */
105 	&fp_ops				/* driver ops */
106 };
107 
108 static struct modlinkage modlinkage = {
109 	MODREV_1,	/* Rev of the loadable modules system */
110 	&modldrv,	/* NULL terminated list of */
111 	NULL		/* Linkage structures */
112 };
113 
114 
115 
116 static uint16_t ns_reg_cmds[] = {
117 	NS_RPN_ID,
118 	NS_RNN_ID,
119 	NS_RCS_ID,
120 	NS_RFT_ID,
121 	NS_RPT_ID,
122 	NS_RSPN_ID,
123 	NS_RSNN_NN
124 };
125 
126 struct fp_xlat {
127 	uchar_t	xlat_state;
128 	int	xlat_rval;
129 } fp_xlat [] = {
130 	{ FC_PKT_SUCCESS,	FC_SUCCESS },
131 	{ FC_PKT_REMOTE_STOP,	FC_FAILURE },
132 	{ FC_PKT_LOCAL_RJT,	FC_FAILURE },
133 	{ FC_PKT_NPORT_RJT,	FC_ELS_PREJECT },
134 	{ FC_PKT_FABRIC_RJT,	FC_ELS_FREJECT },
135 	{ FC_PKT_LOCAL_BSY,	FC_TRAN_BUSY },
136 	{ FC_PKT_TRAN_BSY,	FC_TRAN_BUSY },
137 	{ FC_PKT_NPORT_BSY,	FC_PBUSY },
138 	{ FC_PKT_FABRIC_BSY,	FC_FBUSY },
139 	{ FC_PKT_LS_RJT,	FC_FAILURE },
140 	{ FC_PKT_BA_RJT,	FC_FAILURE },
141 	{ FC_PKT_TIMEOUT,	FC_FAILURE },
142 	{ FC_PKT_TRAN_ERROR,	FC_TRANSPORT_ERROR },
143 	{ FC_PKT_FAILURE,	FC_FAILURE },
144 	{ FC_PKT_PORT_OFFLINE,	FC_OFFLINE }
145 };
146 
147 static uchar_t fp_valid_alpas[] = {
148 	0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
149 	0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
150 	0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
151 	0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
152 	0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
153 	0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
154 	0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
155 	0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
156 	0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
157 	0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
158 	0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
159 	0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
160 	0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
161 	0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
162 };
163 
164 static struct fp_perms {
165 	uint16_t	fp_ioctl_cmd;
166 	uchar_t		fp_open_flag;
167 } fp_perm_list [] = {
168 	{ FCIO_GET_NUM_DEVS, 		FP_OPEN },
169 	{ FCIO_GET_DEV_LIST,		FP_OPEN },
170 	{ FCIO_GET_SYM_PNAME,		FP_OPEN },
171 	{ FCIO_GET_SYM_NNAME,		FP_OPEN },
172 	{ FCIO_SET_SYM_PNAME,		FP_EXCL },
173 	{ FCIO_SET_SYM_NNAME,		FP_EXCL },
174 	{ FCIO_GET_LOGI_PARAMS,		FP_OPEN },
175 	{ FCIO_DEV_LOGIN,		FP_EXCL },
176 	{ FCIO_DEV_LOGOUT,		FP_EXCL },
177 	{ FCIO_GET_STATE,		FP_OPEN },
178 	{ FCIO_DEV_REMOVE,		FP_EXCL },
179 	{ FCIO_GET_FCODE_REV,		FP_OPEN },
180 	{ FCIO_GET_FW_REV,		FP_OPEN },
181 	{ FCIO_GET_DUMP_SIZE,		FP_OPEN },
182 	{ FCIO_FORCE_DUMP,		FP_EXCL },
183 	{ FCIO_GET_DUMP,		FP_OPEN },
184 	{ FCIO_GET_TOPOLOGY,		FP_OPEN },
185 	{ FCIO_RESET_LINK,		FP_EXCL },
186 	{ FCIO_RESET_HARD,		FP_EXCL },
187 	{ FCIO_RESET_HARD_CORE,		FP_EXCL },
188 	{ FCIO_DIAG,			FP_OPEN },
189 	{ FCIO_NS,			FP_EXCL },
190 	{ FCIO_DOWNLOAD_FW,		FP_EXCL },
191 	{ FCIO_DOWNLOAD_FCODE,		FP_EXCL },
192 	{ FCIO_LINK_STATUS,		FP_OPEN },
193 	{ FCIO_GET_HOST_PARAMS,		FP_OPEN },
194 	{ FCIO_GET_NODE_ID,		FP_OPEN },
195 	{ FCIO_SET_NODE_ID,		FP_EXCL },
196 	{ FCIO_SEND_NODE_ID,		FP_OPEN },
197 	{ FCIO_GET_ADAPTER_ATTRIBUTES,	FP_OPEN },
198 	{ FCIO_GET_OTHER_ADAPTER_PORTS,	FP_OPEN },
199 	{ FCIO_GET_ADAPTER_PORT_ATTRIBUTES,	FP_OPEN },
200 	{ FCIO_GET_DISCOVERED_PORT_ATTRIBUTES,	FP_OPEN },
201 	{ FCIO_GET_PORT_ATTRIBUTES,	FP_OPEN },
202 	{ FCIO_GET_ADAPTER_PORT_STATS,	FP_OPEN },
203 	{ FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
204 	{ FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
205 	{ FCIO_DELETE_NPIV_PORT, FP_OPEN },
206 	{ FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
207 	{ FCIO_CREATE_NPIV_PORT, FP_OPEN },
208 	{ FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
209 };
210 
211 static char *fp_pm_comps[] = {
212 	"NAME=FC Port",
213 	"0=Port Down",
214 	"1=Port Up"
215 };
216 
217 
218 #ifdef	_LITTLE_ENDIAN
219 #define	MAKE_BE_32(x)	{ \
220 	uint32_t	*ptr1, i; \
221 		ptr1 = (uint32_t *)(x); \
222 		for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
223 			*ptr1 = BE_32(*ptr1); \
224 			ptr1++; \
225 		} \
226 	}
227 #else
228 #define	MAKE_BE_32(x)
229 #endif
230 
231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
232 static uint32_t fp_options = 0;
233 
234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
235 static int fp_retry_delay = FP_RETRY_DELAY;	/* retry after this delay */
236 static int fp_retry_count = FP_RETRY_COUNT;	/* number of retries */
237 unsigned int fp_offline_ticker;			/* seconds */
238 
239 /*
240  * Driver global variable to anchor the list of soft state structs for
241  * all fp driver instances.  Used with the Solaris DDI soft state functions.
242  */
243 static void *fp_driver_softstate;
244 
245 static clock_t	fp_retry_ticks;
246 static clock_t	fp_offline_ticks;
247 
248 static int fp_retry_ticker;
249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
251 
252 static int		fp_log_size = FP_LOG_SIZE;
253 static int		fp_trace = FP_TRACE_DEFAULT;
254 static fc_trace_logq_t	*fp_logq = NULL;
255 
256 int fp_get_adapter_paths(char *pathList, int count);
257 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
258 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
259 	    la_wwn_t tgt_pwwn, uint32_t port_id);
260 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
261 static void fp_init_symbolic_names(fc_local_port_t *port);
262 
263 
264 /*
265  * Perform global initialization
266  */
267 int
268 _init(void)
269 {
270 	int ret;
271 
272 	if ((ret = ddi_soft_state_init(&fp_driver_softstate,
273 	    sizeof (struct fc_local_port), 8)) != 0) {
274 		return (ret);
275 	}
276 
277 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
278 		ddi_soft_state_fini(&fp_driver_softstate);
279 		return (ret);
280 	}
281 
282 	fp_logq = fc_trace_alloc_logq(fp_log_size);
283 
284 	if ((ret = mod_install(&modlinkage)) != 0) {
285 		fc_trace_free_logq(fp_logq);
286 		ddi_soft_state_fini(&fp_driver_softstate);
287 		scsi_hba_fini(&modlinkage);
288 	}
289 
290 	return (ret);
291 }
292 
293 
294 /*
295  * Prepare for driver unload
296  */
297 int
298 _fini(void)
299 {
300 	int ret;
301 
302 	if ((ret = mod_remove(&modlinkage)) == 0) {
303 		fc_trace_free_logq(fp_logq);
304 		ddi_soft_state_fini(&fp_driver_softstate);
305 		scsi_hba_fini(&modlinkage);
306 	}
307 
308 	return (ret);
309 }
310 
311 
312 /*
313  * Request mod_info() to handle all cases
314  */
315 int
316 _info(struct modinfo *modinfo)
317 {
318 	return (mod_info(&modlinkage, modinfo));
319 }
320 
321 
322 /*
323  * fp_attach:
324  *
325  * The respective cmd handlers take care of performing
326  * ULP related invocations
327  */
328 static int
329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
330 {
331 	int rval;
332 
333 	/*
334 	 * We check the value of fp_offline_ticker at this
335 	 * point. The variable is global for the driver and
336 	 * not specific to an instance.
337 	 *
338 	 * If there is no user-defined value found in /etc/system
339 	 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
340 	 * The minimum setting for this offline timeout according
341 	 * to the FC-FS2 standard (Fibre Channel Framing and
342 	 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
343 	 *
344 	 * We do not recommend setting the value to less than 10
345 	 * seconds (RA_TOV) or more than 90 seconds. If this
346 	 * variable is greater than 90 seconds then drivers above
347 	 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
348 	 */
349 
350 	fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
351 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
352 	    FP_OFFLINE_TICKER);
353 
354 	if ((fp_offline_ticker < 10) ||
355 	    (fp_offline_ticker > 90)) {
356 		cmn_err(CE_WARN, "Setting fp_offline_ticker to "
357 		    "%d second(s). This is outside the "
358 		    "recommended range of 10..90 seconds",
359 		    fp_offline_ticker);
360 	}
361 
362 	/*
363 	 * Tick every second when there are commands to retry.
364 	 * It should tick at the least granular value of pkt_timeout
365 	 * (which is one second)
366 	 */
367 	fp_retry_ticker = 1;
368 
369 	fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
370 	fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
371 
372 	switch (cmd) {
373 	case DDI_ATTACH:
374 		rval = fp_attach_handler(dip);
375 		break;
376 
377 	case DDI_RESUME:
378 		rval = fp_resume_handler(dip);
379 		break;
380 
381 	default:
382 		rval = DDI_FAILURE;
383 		break;
384 	}
385 	return (rval);
386 }
387 
388 
389 /*
390  * fp_detach:
391  *
392  * If a ULP fails to handle cmd request converse of
393  * cmd is invoked for ULPs that previously succeeded
394  * cmd request.
395  */
396 static int
397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
398 {
399 	int 			rval = DDI_FAILURE;
400 	fc_local_port_t 	*port;
401 	fc_attach_cmd_t 	converse;
402 	uint8_t			cnt;
403 
404 	if ((port = ddi_get_soft_state(fp_driver_softstate,
405 	    ddi_get_instance(dip))) == NULL) {
406 		return (DDI_FAILURE);
407 	}
408 
409 	mutex_enter(&port->fp_mutex);
410 
411 	if (port->fp_ulp_attach) {
412 		mutex_exit(&port->fp_mutex);
413 		return (DDI_FAILURE);
414 	}
415 
416 	switch (cmd) {
417 	case DDI_DETACH:
418 		if (port->fp_task != FP_TASK_IDLE) {
419 			mutex_exit(&port->fp_mutex);
420 			return (DDI_FAILURE);
421 		}
422 
423 		/* Let's attempt to quit the job handler gracefully */
424 		port->fp_soft_state |= FP_DETACH_INPROGRESS;
425 
426 		mutex_exit(&port->fp_mutex);
427 		converse = FC_CMD_ATTACH;
428 		if (fctl_detach_ulps(port, FC_CMD_DETACH,
429 		    &modlinkage) != FC_SUCCESS) {
430 			mutex_enter(&port->fp_mutex);
431 			port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
432 			mutex_exit(&port->fp_mutex);
433 			rval = DDI_FAILURE;
434 			break;
435 		}
436 
437 		mutex_enter(&port->fp_mutex);
438 		for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
439 		    cnt++) {
440 			mutex_exit(&port->fp_mutex);
441 			delay(drv_usectohz(1000000));
442 			mutex_enter(&port->fp_mutex);
443 		}
444 
445 		if (port->fp_job_head) {
446 			mutex_exit(&port->fp_mutex);
447 			rval = DDI_FAILURE;
448 			break;
449 		}
450 		mutex_exit(&port->fp_mutex);
451 
452 		rval = fp_detach_handler(port);
453 		break;
454 
455 	case DDI_SUSPEND:
456 		mutex_exit(&port->fp_mutex);
457 		converse = FC_CMD_RESUME;
458 		if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
459 		    &modlinkage) != FC_SUCCESS) {
460 			rval = DDI_FAILURE;
461 			break;
462 		}
463 		if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
464 			(void) callb_generic_cpr(&port->fp_cpr_info,
465 			    CB_CODE_CPR_RESUME);
466 		}
467 		break;
468 
469 	default:
470 		mutex_exit(&port->fp_mutex);
471 		break;
472 	}
473 
474 	/*
475 	 * Use softint to perform reattach.  Mark fp_ulp_attach so we
476 	 * don't attempt to do this repeatedly on behalf of some persistent
477 	 * caller.
478 	 */
479 	if (rval != DDI_SUCCESS) {
480 		mutex_enter(&port->fp_mutex);
481 		port->fp_ulp_attach = 1;
482 
483 		/*
484 		 * If the port is in the low power mode then there is
485 		 * possibility that fca too could be in low power mode.
486 		 * Try to raise the power before calling attach ulps.
487 		 */
488 
489 		if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
490 		    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
491 			mutex_exit(&port->fp_mutex);
492 			(void) pm_raise_power(port->fp_port_dip,
493 			    FP_PM_COMPONENT, FP_PM_PORT_UP);
494 		} else {
495 			mutex_exit(&port->fp_mutex);
496 		}
497 
498 
499 		fp_attach_ulps(port, converse);
500 
501 		mutex_enter(&port->fp_mutex);
502 		while (port->fp_ulp_attach) {
503 			cv_wait(&port->fp_attach_cv, &port->fp_mutex);
504 		}
505 
506 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
507 
508 		/*
509 		 * Mark state as detach failed so asynchronous ULP attach
510 		 * events (downstream, not the ones we're initiating with
511 		 * the call to fp_attach_ulps) are not honored.  We're
512 		 * really still in pending detach.
513 		 */
514 		port->fp_soft_state |= FP_DETACH_FAILED;
515 
516 		mutex_exit(&port->fp_mutex);
517 	}
518 
519 	return (rval);
520 }
521 
522 
523 /*
524  * fp_getinfo:
525  *   Given the device number, return either the
526  *   dev_info_t pointer or the instance number.
527  */
528 
529 /* ARGSUSED */
530 static int
531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
532 {
533 	int 		rval;
534 	minor_t		instance;
535 	fc_local_port_t *port;
536 
537 	rval = DDI_SUCCESS;
538 	instance = getminor((dev_t)arg);
539 
540 	switch (cmd) {
541 	case DDI_INFO_DEVT2DEVINFO:
542 		if ((port = ddi_get_soft_state(fp_driver_softstate,
543 		    instance)) == NULL) {
544 			rval = DDI_FAILURE;
545 			break;
546 		}
547 		*result = (void *)port->fp_port_dip;
548 		break;
549 
550 	case DDI_INFO_DEVT2INSTANCE:
551 		*result = (void *)(uintptr_t)instance;
552 		break;
553 
554 	default:
555 		rval = DDI_FAILURE;
556 		break;
557 	}
558 
559 	return (rval);
560 }
561 
562 
563 /*
564  * Entry point for power up and power down request from kernel
565  */
566 static int
567 fp_power(dev_info_t *dip, int comp, int level)
568 {
569 	int		rval = DDI_FAILURE;
570 	fc_local_port_t	*port;
571 
572 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
573 	if (port == NULL || comp != FP_PM_COMPONENT) {
574 		return (rval);
575 	}
576 
577 	switch (level) {
578 	case FP_PM_PORT_UP:
579 		rval = DDI_SUCCESS;
580 
581 		/*
582 		 * If the port is DDI_SUSPENDed, let the DDI_RESUME
583 		 * code complete the rediscovery.
584 		 */
585 		mutex_enter(&port->fp_mutex);
586 		if (port->fp_soft_state & FP_SOFT_SUSPEND) {
587 			port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
588 			port->fp_pm_level = FP_PM_PORT_UP;
589 			mutex_exit(&port->fp_mutex);
590 			fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
591 			break;
592 		}
593 
594 		if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
595 			ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
596 
597 			port->fp_pm_level = FP_PM_PORT_UP;
598 			rval = fp_power_up(port);
599 			if (rval != DDI_SUCCESS) {
600 				port->fp_pm_level = FP_PM_PORT_DOWN;
601 			}
602 		} else {
603 			port->fp_pm_level = FP_PM_PORT_UP;
604 		}
605 		mutex_exit(&port->fp_mutex);
606 		break;
607 
608 	case FP_PM_PORT_DOWN:
609 		mutex_enter(&port->fp_mutex);
610 
611 		ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
612 		if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
613 			/*
614 			 * PM framework goofed up. We have don't
615 			 * have any PM components. Let's never go down.
616 			 */
617 			mutex_exit(&port->fp_mutex);
618 			break;
619 
620 		}
621 
622 		if (port->fp_ulp_attach) {
623 			/* We shouldn't let the power go down */
624 			mutex_exit(&port->fp_mutex);
625 			break;
626 		}
627 
628 		/*
629 		 * Not a whole lot to do if we are detaching
630 		 */
631 		if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
632 			port->fp_pm_level = FP_PM_PORT_DOWN;
633 			mutex_exit(&port->fp_mutex);
634 			rval = DDI_SUCCESS;
635 			break;
636 		}
637 
638 		if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
639 			port->fp_pm_level = FP_PM_PORT_DOWN;
640 
641 			rval = fp_power_down(port);
642 			if (rval != DDI_SUCCESS) {
643 				port->fp_pm_level = FP_PM_PORT_UP;
644 				ASSERT(!(port->fp_soft_state &
645 				    FP_SOFT_POWER_DOWN));
646 			} else {
647 				ASSERT(port->fp_soft_state &
648 				    FP_SOFT_POWER_DOWN);
649 			}
650 		}
651 		mutex_exit(&port->fp_mutex);
652 		break;
653 
654 	default:
655 		break;
656 	}
657 
658 	return (rval);
659 }
660 
661 
662 /*
663  * Open FC port devctl node
664  */
665 static int
666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
667 {
668 	int 		instance;
669 	fc_local_port_t *port;
670 
671 	if (otype != OTYP_CHR) {
672 		return (EINVAL);
673 	}
674 
675 	/*
676 	 * This is not a toy to play with. Allow only powerful
677 	 * users (hopefully knowledgeable) to access the port
678 	 * (A hacker potentially could download a sick binary
679 	 * file into FCA)
680 	 */
681 	if (drv_priv(credp)) {
682 		return (EPERM);
683 	}
684 
685 	instance = (int)getminor(*devp);
686 
687 	port = ddi_get_soft_state(fp_driver_softstate, instance);
688 	if (port == NULL) {
689 		return (ENXIO);
690 	}
691 
692 	mutex_enter(&port->fp_mutex);
693 	if (port->fp_flag & FP_EXCL) {
694 		/*
695 		 * It is already open for exclusive access.
696 		 * So shut the door on this caller.
697 		 */
698 		mutex_exit(&port->fp_mutex);
699 		return (EBUSY);
700 	}
701 
702 	if (flag & FEXCL) {
703 		if (port->fp_flag & FP_OPEN) {
704 			/*
705 			 * Exclusive operation not possible
706 			 * as it is already opened
707 			 */
708 			mutex_exit(&port->fp_mutex);
709 			return (EBUSY);
710 		}
711 		port->fp_flag |= FP_EXCL;
712 	}
713 	port->fp_flag |= FP_OPEN;
714 	mutex_exit(&port->fp_mutex);
715 
716 	return (0);
717 }
718 
719 
720 /*
721  * The driver close entry point is called on the last close()
722  * of a device. So it is perfectly alright to just clobber the
723  * open flag and reset it to idle (instead of having to reset
724  * each flag bits). For any confusion, check out close(9E).
725  */
726 
727 /* ARGSUSED */
728 static int
729 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
730 {
731 	int 		instance;
732 	fc_local_port_t *port;
733 
734 	if (otype != OTYP_CHR) {
735 		return (EINVAL);
736 	}
737 
738 	instance = (int)getminor(dev);
739 
740 	port = ddi_get_soft_state(fp_driver_softstate, instance);
741 	if (port == NULL) {
742 		return (ENXIO);
743 	}
744 
745 	mutex_enter(&port->fp_mutex);
746 	if ((port->fp_flag & FP_OPEN) == 0) {
747 		mutex_exit(&port->fp_mutex);
748 		return (ENODEV);
749 	}
750 	port->fp_flag = FP_IDLE;
751 	mutex_exit(&port->fp_mutex);
752 
753 	return (0);
754 }
755 
756 /*
757  * Handle IOCTL requests
758  */
759 
760 /* ARGSUSED */
761 static int
762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
763 {
764 	int 		instance;
765 	int		ret = 0;
766 	fcio_t		fcio;
767 	fc_local_port_t *port;
768 
769 	instance = (int)getminor(dev);
770 
771 	port = ddi_get_soft_state(fp_driver_softstate, instance);
772 	if (port == NULL) {
773 		return (ENXIO);
774 	}
775 
776 	mutex_enter(&port->fp_mutex);
777 	if ((port->fp_flag & FP_OPEN) == 0) {
778 		mutex_exit(&port->fp_mutex);
779 		return (ENXIO);
780 	}
781 
782 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
783 		mutex_exit(&port->fp_mutex);
784 		return (ENXIO);
785 	}
786 
787 	mutex_exit(&port->fp_mutex);
788 
789 	/* this will raise power if necessary */
790 	ret = fctl_busy_port(port);
791 	if (ret != 0) {
792 		return (ret);
793 	}
794 
795 	ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
796 
797 
798 	switch (cmd) {
799 	case FCIO_CMD: {
800 #ifdef	_MULTI_DATAMODEL
801 		switch (ddi_model_convert_from(mode & FMODELS)) {
802 		case DDI_MODEL_ILP32: {
803 			struct fcio32 fcio32;
804 
805 			if (ddi_copyin((void *)data, (void *)&fcio32,
806 			    sizeof (struct fcio32), mode)) {
807 				ret = EFAULT;
808 				break;
809 			}
810 			fcio.fcio_xfer = fcio32.fcio_xfer;
811 			fcio.fcio_cmd = fcio32.fcio_cmd;
812 			fcio.fcio_flags = fcio32.fcio_flags;
813 			fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
814 			fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
815 			fcio.fcio_ibuf =
816 				(caddr_t)(uintptr_t)fcio32.fcio_ibuf;
817 			fcio.fcio_olen = (size_t)fcio32.fcio_olen;
818 			fcio.fcio_obuf =
819 				(caddr_t)(uintptr_t)fcio32.fcio_obuf;
820 			fcio.fcio_alen = (size_t)fcio32.fcio_alen;
821 			fcio.fcio_abuf =
822 				(caddr_t)(uintptr_t)fcio32.fcio_abuf;
823 			fcio.fcio_errno = fcio32.fcio_errno;
824 			break;
825 		}
826 
827 		case DDI_MODEL_NONE:
828 			if (ddi_copyin((void *)data, (void *)&fcio,
829 			    sizeof (fcio_t), mode)) {
830 				ret = EFAULT;
831 			}
832 			break;
833 		}
834 #else	/* _MULTI_DATAMODEL */
835 		if (ddi_copyin((void *)data, (void *)&fcio,
836 		sizeof (fcio_t), mode)) {
837 			ret = EFAULT;
838 			break;
839 		}
840 #endif	/* _MULTI_DATAMODEL */
841 		if (!ret) {
842 			ret = fp_fciocmd(port, data, mode, &fcio);
843 		}
844 		break;
845 	}
846 
847 	default:
848 		ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
849 		    mode, credp, rval);
850 	}
851 
852 	fctl_idle_port(port);
853 
854 	return (ret);
855 }
856 
857 
858 /*
859  * Init Symbolic Port Name and Node Name
860  * LV will try to get symbolic names from FCA driver
861  * and register these to name server,
862  * if LV fails to get these,
863  * LV will register its default symbolic names to name server.
864  * The Default symbolic node name format is :
865  * 	<hostname>:<hba driver name>(instance)
866  * The Default symbolic port name format is :
867  * 	<fp path name>
868  */
869 static void
870 fp_init_symbolic_names(fc_local_port_t *port)
871 {
872 	const char *vendorname = ddi_driver_name(port->fp_fca_dip);
873 	char *sym_name;
874 	char fcaname[50] = {0};
875 	int hostnlen, fcanlen;
876 
877 	if (port->fp_sym_node_namelen == 0) {
878 		hostnlen = strlen(utsname.nodename);
879 		(void) snprintf(fcaname, sizeof (fcaname),
880 		    "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
881 		fcanlen = strlen(fcaname);
882 
883 		sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
884 		(void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
885 		port->fp_sym_node_namelen = strlen(sym_name);
886 		if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
887 			port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
888 		}
889 		(void) strncpy(port->fp_sym_node_name, sym_name,
890 		    port->fp_sym_node_namelen);
891 		kmem_free(sym_name, hostnlen + fcanlen + 2);
892 	}
893 
894 	if (port->fp_sym_port_namelen == 0) {
895 		char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
896 
897 		(void) ddi_pathname(port->fp_port_dip, pathname);
898 		port->fp_sym_port_namelen = strlen(pathname);
899 		if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
900 			port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
901 		}
902 		(void) strncpy(port->fp_sym_port_name, pathname,
903 		    port->fp_sym_port_namelen);
904 		kmem_free(pathname, MAXPATHLEN);
905 	}
906 }
907 
908 
909 /*
910  * Perform port attach
911  */
912 static int
913 fp_attach_handler(dev_info_t *dip)
914 {
915 	int			rval;
916 	int			instance;
917 	int 			port_num;
918 	int			port_len;
919 	char 			name[30];
920 	char			i_pwwn[17];
921 	fp_cmd_t		*pkt;
922 	uint32_t		ub_count;
923 	fc_local_port_t 	*port;
924 	job_request_t 		*job;
925 	fc_local_port_t *phyport = NULL;
926 	int portpro1;
927 	char pwwn[17], nwwn[17];
928 
929 	instance = ddi_get_instance(dip);
930 
931 	port_len = sizeof (port_num);
932 
933 	rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
934 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
935 	    (caddr_t)&port_num, &port_len);
936 
937 	if (rval != DDI_SUCCESS) {
938 		cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
939 		    instance);
940 		return (DDI_FAILURE);
941 	}
942 
943 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
944 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
945 		cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
946 		    instance);
947 		return (DDI_FAILURE);
948 	}
949 
950 	if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
951 	    DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
952 		cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
953 		    " point minor node", instance);
954 		ddi_remove_minor_node(dip, NULL);
955 		return (DDI_FAILURE);
956 	}
957 
958 	if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
959 	    != DDI_SUCCESS) {
960 		cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
961 		    instance);
962 		ddi_remove_minor_node(dip, NULL);
963 		return (DDI_FAILURE);
964 	}
965 	port = ddi_get_soft_state(fp_driver_softstate, instance);
966 
967 	(void) sprintf(port->fp_ibuf, "fp(%d)", instance);
968 
969 	port->fp_instance = instance;
970 	port->fp_ulp_attach = 1;
971 	port->fp_port_num = port_num;
972 	port->fp_verbose = fp_verbosity;
973 	port->fp_options = fp_options;
974 
975 	port->fp_fca_dip = ddi_get_parent(dip);
976 	port->fp_port_dip = dip;
977 	port->fp_fca_tran = (fc_fca_tran_t *)
978 	    ddi_get_driver_private(port->fp_fca_dip);
979 
980 	port->fp_task = port->fp_last_task = FP_TASK_IDLE;
981 
982 	/*
983 	 * Init the starting value of fp_rscn_count. Note that if
984 	 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
985 	 * actual # of RSCNs will be (fp_rscn_count - 1)
986 	 */
987 	port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
988 
989 	mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
990 	cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
991 	cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
992 
993 	(void) sprintf(name, "fp%d_cache", instance);
994 
995 	if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
996 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
997 	    "phyport-instance", -1)) != -1) {
998 		phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
999 		fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
1000 		fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
1001 		port->fp_npiv_type = FC_NPIV_PORT;
1002 	}
1003 
1004 	/*
1005 	 * Allocate the pool of fc_packet_t structs to be used with
1006 	 * this fp instance.
1007 	 */
1008 	port->fp_pkt_cache = kmem_cache_create(name,
1009 	    (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1010 	    fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1011 	    NULL, 0);
1012 
1013 	if (port->fp_pkt_cache == NULL) {
1014 		goto cache_alloc_failed;
1015 	}
1016 
1017 
1018 	/*
1019 	 * Allocate the d_id and pwwn hash tables for all remote ports
1020 	 * connected to this local port.
1021 	 */
1022 	port->fp_did_table = kmem_zalloc(did_table_size *
1023 	    sizeof (struct d_id_hash), KM_SLEEP);
1024 
1025 	port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1026 	    sizeof (struct pwwn_hash), KM_SLEEP);
1027 
1028 	port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1029 	    MINCLSYSPRI, 1, 16, 0);
1030 
1031 	/* Indicate that don't have the pm components yet */
1032 	port->fp_soft_state |=  FP_SOFT_NO_PMCOMP;
1033 
1034 	/*
1035 	 * Bind the callbacks with the FCA driver. This will open the gate
1036 	 * for asynchronous callbacks, so after this call the fp_mutex
1037 	 * must be held when updating the fc_local_port_t struct.
1038 	 *
1039 	 * This is done _before_ setting up the job thread so we can avoid
1040 	 * cleaning up after the thread_create() in the error path. This
1041 	 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1042 	 */
1043 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1044 		goto bind_callbacks_failed;
1045 	}
1046 
1047 	if (phyport) {
1048 		mutex_enter(&phyport->fp_mutex);
1049 		if (phyport->fp_port_next) {
1050 			phyport->fp_port_next->fp_port_prev = port;
1051 			port->fp_port_next =  phyport->fp_port_next;
1052 			phyport->fp_port_next = port;
1053 			port->fp_port_prev = phyport;
1054 		} else {
1055 			phyport->fp_port_next = port;
1056 			phyport->fp_port_prev = port;
1057 			port->fp_port_next =  phyport;
1058 			port->fp_port_prev = phyport;
1059 		}
1060 		mutex_exit(&phyport->fp_mutex);
1061 	}
1062 
1063 	/*
1064 	 * Init Symbolic Names
1065 	 */
1066 	fp_init_symbolic_names(port);
1067 
1068 	pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1069 	    KM_SLEEP, NULL);
1070 
1071 	if (pkt == NULL) {
1072 		cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1073 		    instance);
1074 		goto alloc_els_packet_failed;
1075 	}
1076 
1077 	(void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1078 	    v.v_maxsyspri - 2);
1079 
1080 	fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1081 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1082 	    i_pwwn) != DDI_PROP_SUCCESS) {
1083 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1084 		    "fp(%d): Updating 'initiator-port' property"
1085 		    " on fp dev_info node failed", instance);
1086 	}
1087 
1088 	fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1089 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1090 	    i_pwwn) != DDI_PROP_SUCCESS) {
1091 		fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1092 		    "fp(%d): Updating 'initiator-node' property"
1093 		    " on fp dev_info node failed", instance);
1094 	}
1095 
1096 	mutex_enter(&port->fp_mutex);
1097 	port->fp_els_resp_pkt = pkt;
1098 	mutex_exit(&port->fp_mutex);
1099 
1100 	/*
1101 	 * Determine the count of unsolicited buffers this FCA can support
1102 	 */
1103 	fp_retrieve_caps(port);
1104 
1105 	/*
1106 	 * Allocate unsolicited buffer tokens
1107 	 */
1108 	if (port->fp_ub_count) {
1109 		ub_count = port->fp_ub_count;
1110 		port->fp_ub_tokens = kmem_zalloc(ub_count *
1111 		    sizeof (*port->fp_ub_tokens), KM_SLEEP);
1112 		/*
1113 		 * Do not fail the attach if unsolicited buffer allocation
1114 		 * fails; Just try to get along with whatever the FCA can do.
1115 		 */
1116 		if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1117 		    FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1118 		    FC_SUCCESS || ub_count != port->fp_ub_count) {
1119 			cmn_err(CE_WARN, "fp(%d): failed to allocate "
1120 			    " Unsolicited buffers. proceeding with attach...",
1121 			    instance);
1122 			kmem_free(port->fp_ub_tokens,
1123 			    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1124 			port->fp_ub_tokens = NULL;
1125 		}
1126 	}
1127 
1128 	fp_load_ulp_modules(dip, port);
1129 
1130 	/*
1131 	 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1132 	 */
1133 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1134 	    "pm-hardware-state", "needs-suspend-resume",
1135 	    strlen("needs-suspend-resume") + 1);
1136 
1137 	/*
1138 	 * fctl maintains a list of all port handles, so
1139 	 * help fctl add this one to its list now.
1140 	 */
1141 	mutex_enter(&port->fp_mutex);
1142 	fctl_add_port(port);
1143 
1144 	/*
1145 	 * If a state change is already in progress, set the bind state t
1146 	 * OFFLINE as well, so further state change callbacks into ULPs
1147 	 * will pass the appropriate states
1148 	 */
1149 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1150 	    port->fp_statec_busy) {
1151 		port->fp_bind_state = FC_STATE_OFFLINE;
1152 		mutex_exit(&port->fp_mutex);
1153 
1154 		fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1155 	} else {
1156 		/*
1157 		 * Without dropping the mutex, ensure that the port
1158 		 * startup happens ahead of state change callback
1159 		 * processing
1160 		 */
1161 		ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1162 
1163 		port->fp_last_task = port->fp_task;
1164 		port->fp_task = FP_TASK_PORT_STARTUP;
1165 
1166 		job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1167 		    fp_startup_done, (opaque_t)port, KM_SLEEP);
1168 
1169 		port->fp_job_head = port->fp_job_tail = job;
1170 
1171 		cv_signal(&port->fp_cv);
1172 
1173 		mutex_exit(&port->fp_mutex);
1174 	}
1175 
1176 	mutex_enter(&port->fp_mutex);
1177 	while (port->fp_ulp_attach) {
1178 		cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1179 	}
1180 	mutex_exit(&port->fp_mutex);
1181 
1182 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1183 	    "pm-components", fp_pm_comps,
1184 	    sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1185 	    DDI_PROP_SUCCESS) {
1186 		FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1187 		    " components property, PM disabled on this port.");
1188 		mutex_enter(&port->fp_mutex);
1189 		port->fp_pm_level = FP_PM_PORT_UP;
1190 		mutex_exit(&port->fp_mutex);
1191 	} else {
1192 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1193 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1194 			FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1195 			    " power level");
1196 			mutex_enter(&port->fp_mutex);
1197 			port->fp_pm_level = FP_PM_PORT_UP;
1198 			mutex_exit(&port->fp_mutex);
1199 		}
1200 
1201 		/*
1202 		 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1203 		 * the call to pm_raise_power.  The PM framework can't
1204 		 * handle multiple threads calling into it during attach.
1205 		 */
1206 
1207 		mutex_enter(&port->fp_mutex);
1208 		port->fp_soft_state &=  ~FP_SOFT_NO_PMCOMP;
1209 		mutex_exit(&port->fp_mutex);
1210 	}
1211 
1212 	ddi_report_dev(dip);
1213 
1214 	fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1215 
1216 	return (DDI_SUCCESS);
1217 
1218 	/*
1219 	 * Unwind any/all preceeding allocations in the event of an error.
1220 	 */
1221 
1222 alloc_els_packet_failed:
1223 
1224 	if (port->fp_fca_handle != NULL) {
1225 		port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1226 		port->fp_fca_handle = NULL;
1227 	}
1228 
1229 	if (port->fp_ub_tokens != NULL) {
1230 		(void) fc_ulp_ubfree(port, port->fp_ub_count,
1231 		    port->fp_ub_tokens);
1232 		kmem_free(port->fp_ub_tokens,
1233 		    port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1234 		port->fp_ub_tokens = NULL;
1235 	}
1236 
1237 	if (port->fp_els_resp_pkt != NULL) {
1238 		fp_free_pkt(port->fp_els_resp_pkt);
1239 		port->fp_els_resp_pkt = NULL;
1240 	}
1241 
1242 bind_callbacks_failed:
1243 
1244 	if (port->fp_taskq != NULL) {
1245 		taskq_destroy(port->fp_taskq);
1246 	}
1247 
1248 	if (port->fp_pwwn_table != NULL) {
1249 		kmem_free(port->fp_pwwn_table,
1250 		    pwwn_table_size * sizeof (struct pwwn_hash));
1251 		port->fp_pwwn_table = NULL;
1252 	}
1253 
1254 	if (port->fp_did_table != NULL) {
1255 		kmem_free(port->fp_did_table,
1256 		    did_table_size * sizeof (struct d_id_hash));
1257 		port->fp_did_table = NULL;
1258 	}
1259 
1260 	if (port->fp_pkt_cache != NULL) {
1261 		kmem_cache_destroy(port->fp_pkt_cache);
1262 		port->fp_pkt_cache = NULL;
1263 	}
1264 
1265 cache_alloc_failed:
1266 
1267 	cv_destroy(&port->fp_attach_cv);
1268 	cv_destroy(&port->fp_cv);
1269 	mutex_destroy(&port->fp_mutex);
1270 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1271 	ddi_soft_state_free(fp_driver_softstate, instance);
1272 	ddi_prop_remove_all(dip);
1273 
1274 	return (DDI_FAILURE);
1275 }
1276 
1277 
1278 /*
1279  * Handle DDI_RESUME request
1280  */
1281 static int
1282 fp_resume_handler(dev_info_t *dip)
1283 {
1284 	int		rval;
1285 	fc_local_port_t *port;
1286 
1287 	port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1288 
1289 	ASSERT(port != NULL);
1290 
1291 #ifdef	DEBUG
1292 	mutex_enter(&port->fp_mutex);
1293 	ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1294 	mutex_exit(&port->fp_mutex);
1295 #endif
1296 
1297 	/*
1298 	 * If the port was power suspended, raise the power level
1299 	 */
1300 	mutex_enter(&port->fp_mutex);
1301 	if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1302 	    (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1303 		ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1304 
1305 		mutex_exit(&port->fp_mutex);
1306 		if (pm_raise_power(dip, FP_PM_COMPONENT,
1307 		    FP_PM_PORT_UP) != DDI_SUCCESS) {
1308 			FP_TRACE(FP_NHEAD2(9, 0),
1309 			    "Failed to raise the power level");
1310 			return (DDI_FAILURE);
1311 		}
1312 		mutex_enter(&port->fp_mutex);
1313 	}
1314 	port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1315 	mutex_exit(&port->fp_mutex);
1316 
1317 	/*
1318 	 * All the discovery is initiated and handled by per-port thread.
1319 	 * Further all the discovery is done in handled in callback mode
1320 	 * (not polled mode); In a specific case such as this, the discovery
1321 	 * is required to happen in polled mode. The easiest way out is
1322 	 * to bail out port thread and get started. Come back and fix this
1323 	 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1324 	 * will do on-demand discovery during pre-power-up busctl handling
1325 	 * which will only be possible when SCSA provides a new HBA vector
1326 	 * for sending down the PM busctl requests.
1327 	 */
1328 	(void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1329 
1330 	rval = fp_resume_all(port, FC_CMD_RESUME);
1331 	if (rval != DDI_SUCCESS) {
1332 		mutex_enter(&port->fp_mutex);
1333 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1334 		mutex_exit(&port->fp_mutex);
1335 		(void) callb_generic_cpr(&port->fp_cpr_info,
1336 		    CB_CODE_CPR_CHKPT);
1337 	}
1338 
1339 	return (rval);
1340 }
1341 
1342 /*
1343  * Perform FC Port power on initialization
1344  */
1345 static int
1346 fp_power_up(fc_local_port_t *port)
1347 {
1348 	int	rval;
1349 
1350 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1351 
1352 	ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1353 	ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1354 
1355 	port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1356 
1357 	mutex_exit(&port->fp_mutex);
1358 
1359 	rval = fp_resume_all(port, FC_CMD_POWER_UP);
1360 	if (rval != DDI_SUCCESS) {
1361 		mutex_enter(&port->fp_mutex);
1362 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1363 	} else {
1364 		mutex_enter(&port->fp_mutex);
1365 	}
1366 
1367 	return (rval);
1368 }
1369 
1370 
1371 /*
1372  * It is important to note that the power may possibly be removed between
1373  * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1374  * FC port hardware would have gone through an OFFLINE to ONLINE transition
1375  * (hardware state). In this case, the port driver may need to rediscover the
1376  * topology, perform LOGINs, register with the name server again and perform
1377  * any such port initialization procedures. To perform LOGINs, the driver could
1378  * use the port device handle to see if a LOGIN needs to be performed and use
1379  * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1380  * or removed) which will be reflected in the map the ULPs will see.
1381  */
1382 static int
1383 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1384 {
1385 
1386 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1387 
1388 	if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1389 		return (DDI_FAILURE);
1390 	}
1391 
1392 	mutex_enter(&port->fp_mutex);
1393 
1394 	/*
1395 	 * If there are commands queued for delayed retry, instead of
1396 	 * working the hard way to figure out which ones are good for
1397 	 * restart and which ones not (ELSs are definitely not good
1398 	 * as the port will have to go through a new spin of rediscovery
1399 	 * now), so just flush them out.
1400 	 */
1401 	if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1402 		fp_cmd_t	*cmd;
1403 
1404 		port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1405 
1406 		mutex_exit(&port->fp_mutex);
1407 		while ((cmd = fp_deque_cmd(port)) != NULL) {
1408 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1409 			fp_iodone(cmd);
1410 		}
1411 		mutex_enter(&port->fp_mutex);
1412 	}
1413 
1414 	if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1415 		if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1416 		    port->fp_dev_count) {
1417 			port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1418 			port->fp_offline_tid = timeout(fp_offline_timeout,
1419 			    (caddr_t)port, fp_offline_ticks);
1420 		}
1421 		if (port->fp_job_head) {
1422 			cv_signal(&port->fp_cv);
1423 		}
1424 		mutex_exit(&port->fp_mutex);
1425 		fctl_attach_ulps(port, cmd, &modlinkage);
1426 	} else {
1427 		struct job_request *job;
1428 
1429 		/*
1430 		 * If an OFFLINE timer was running at the time of
1431 		 * suspending, there is no need to restart it as
1432 		 * the port is ONLINE now.
1433 		 */
1434 		port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1435 		if (port->fp_statec_busy == 0) {
1436 			port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1437 		}
1438 		port->fp_statec_busy++;
1439 		mutex_exit(&port->fp_mutex);
1440 
1441 		job = fctl_alloc_job(JOB_PORT_ONLINE,
1442 		    JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1443 		fctl_enque_job(port, job);
1444 
1445 		fctl_jobwait(job);
1446 		fctl_remove_oldies(port);
1447 
1448 		fctl_attach_ulps(port, cmd, &modlinkage);
1449 		fctl_dealloc_job(job);
1450 	}
1451 
1452 	return (DDI_SUCCESS);
1453 }
1454 
1455 
1456 /*
1457  * At this time, there shouldn't be any I/O requests on this port.
1458  * But the unsolicited callbacks from the underlying FCA port need
1459  * to be handled very carefully. The steps followed to handle the
1460  * DDI_DETACH are:
1461  *	+ 	Grab the port driver mutex, check if the unsolicited
1462  *		callback is currently under processing. If true, fail
1463  *		the DDI_DETACH request by printing a message; If false
1464  *		mark the DDI_DETACH as under progress, so that any
1465  *		further unsolicited callbacks get bounced.
1466  *	+	Perform PRLO/LOGO if necessary, cleanup all the data
1467  *		structures.
1468  *	+	Get the job_handler thread to gracefully exit.
1469  *	+	Unregister callbacks with the FCA port.
1470  *	+	Now that some peace is found, notify all the ULPs of
1471  *		DDI_DETACH request (using ulp_port_detach entry point)
1472  *	+	Free all mutexes, semaphores, conditional variables.
1473  *	+	Free the soft state, return success.
1474  *
1475  * Important considerations:
1476  *		Port driver de-registers state change and unsolicited
1477  *		callbacks before taking up the task of notifying ULPs
1478  *		and performing PRLO and LOGOs.
1479  *
1480  *		A port may go offline at the time PRLO/LOGO is being
1481  *		requested. It is expected of all FCA drivers to fail
1482  *		such requests either immediately with a FC_OFFLINE
1483  *		return code to fc_fca_transport() or return the packet
1484  *		asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1485  */
1486 static int
1487 fp_detach_handler(fc_local_port_t *port)
1488 {
1489 	job_request_t 	*job;
1490 	uint32_t	delay_count;
1491 	fc_orphan_t	*orp, *tmporp;
1492 
1493 	/*
1494 	 * In a Fabric topology with many host ports connected to
1495 	 * a switch, another detaching instance of fp might have
1496 	 * triggered a LOGO (which is an unsolicited request to
1497 	 * this instance). So in order to be able to successfully
1498 	 * detach by taking care of such cases a delay of about
1499 	 * 30 seconds is introduced.
1500 	 */
1501 	delay_count = 0;
1502 	mutex_enter(&port->fp_mutex);
1503 	while ((port->fp_soft_state &
1504 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1505 	    (delay_count < 30)) {
1506 		mutex_exit(&port->fp_mutex);
1507 		delay_count++;
1508 		delay(drv_usectohz(1000000));
1509 		mutex_enter(&port->fp_mutex);
1510 	}
1511 
1512 	if (port->fp_soft_state &
1513 	    (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1514 		port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1515 		mutex_exit(&port->fp_mutex);
1516 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1517 		    " Failing detach", port->fp_instance);
1518 		return (DDI_FAILURE);
1519 	}
1520 
1521 	port->fp_soft_state |= FP_SOFT_IN_DETACH;
1522 	port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1523 	mutex_exit(&port->fp_mutex);
1524 
1525 	/*
1526 	 * If we're powered down, we need to raise power prior to submitting
1527 	 * the JOB_PORT_SHUTDOWN job.  Otherwise, the job handler will never
1528 	 * process the shutdown job.
1529 	 */
1530 	if (fctl_busy_port(port) != 0) {
1531 		cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1532 		    port->fp_instance);
1533 		mutex_enter(&port->fp_mutex);
1534 		port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1535 		mutex_exit(&port->fp_mutex);
1536 		return (DDI_FAILURE);
1537 	}
1538 
1539 	/*
1540 	 * This will deallocate data structs and cause the "job" thread
1541 	 * to exit, in preparation for DDI_DETACH on the instance.
1542 	 * This can sleep for an arbitrary duration, since it waits for
1543 	 * commands over the wire, timeout(9F) callbacks, etc.
1544 	 *
1545 	 * CAUTION: There is still a race here, where the "job" thread
1546 	 * can still be executing code even tho the fctl_jobwait() call
1547 	 * below has returned to us.  In theory the fp driver could even be
1548 	 * modunloaded even tho the job thread isn't done executing.
1549 	 * without creating the race condition.
1550 	 */
1551 	job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1552 	    (opaque_t)port, KM_SLEEP);
1553 	fctl_enque_job(port, job);
1554 	fctl_jobwait(job);
1555 	fctl_dealloc_job(job);
1556 
1557 
1558 	(void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1559 	    FP_PM_PORT_DOWN);
1560 
1561 	if (port->fp_taskq) {
1562 		taskq_destroy(port->fp_taskq);
1563 	}
1564 
1565 	ddi_prop_remove_all(port->fp_port_dip);
1566 
1567 	ddi_remove_minor_node(port->fp_port_dip, NULL);
1568 
1569 	fctl_remove_port(port);
1570 
1571 	fp_free_pkt(port->fp_els_resp_pkt);
1572 
1573 	if (port->fp_ub_tokens) {
1574 		if (fc_ulp_ubfree(port, port->fp_ub_count,
1575 		    port->fp_ub_tokens) != FC_SUCCESS) {
1576 			cmn_err(CE_WARN, "fp(%d): couldn't free "
1577 			    " unsolicited buffers", port->fp_instance);
1578 		}
1579 		kmem_free(port->fp_ub_tokens,
1580 		    sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1581 		port->fp_ub_tokens = NULL;
1582 	}
1583 
1584 	if (port->fp_pkt_cache != NULL) {
1585 		kmem_cache_destroy(port->fp_pkt_cache);
1586 	}
1587 
1588 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1589 
1590 	mutex_enter(&port->fp_mutex);
1591 	if (port->fp_did_table) {
1592 		kmem_free(port->fp_did_table, did_table_size *
1593 			sizeof (struct d_id_hash));
1594 	}
1595 
1596 	if (port->fp_pwwn_table) {
1597 		kmem_free(port->fp_pwwn_table, pwwn_table_size *
1598 		    sizeof (struct pwwn_hash));
1599 	}
1600 	orp = port->fp_orphan_list;
1601 	while (orp) {
1602 		tmporp = orp;
1603 		orp = orp->orp_next;
1604 		kmem_free(tmporp, sizeof (*orp));
1605 	}
1606 
1607 	mutex_exit(&port->fp_mutex);
1608 
1609 	fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1610 
1611 	mutex_destroy(&port->fp_mutex);
1612 	cv_destroy(&port->fp_attach_cv);
1613 	cv_destroy(&port->fp_cv);
1614 	ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1615 
1616 	return (DDI_SUCCESS);
1617 }
1618 
1619 
1620 /*
1621  * Steps to perform DDI_SUSPEND operation on a FC port
1622  *
1623  *	- If already suspended return DDI_FAILURE
1624  *	- If already power-suspended return DDI_SUCCESS
1625  *	- If an unsolicited callback or state change handling is in
1626  *	    in progress, throw a warning message, return DDI_FAILURE
1627  *	- Cancel timeouts
1628  *	- SUSPEND the job_handler thread (means do nothing as it is
1629  *          taken care of by the CPR frame work)
1630  */
1631 static int
1632 fp_suspend_handler(fc_local_port_t *port)
1633 {
1634 	uint32_t	delay_count;
1635 
1636 	mutex_enter(&port->fp_mutex);
1637 
1638 	/*
1639 	 * The following should never happen, but
1640 	 * let the driver be more defensive here
1641 	 */
1642 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1643 		mutex_exit(&port->fp_mutex);
1644 		return (DDI_FAILURE);
1645 	}
1646 
1647 	/*
1648 	 * If the port is already power suspended, there
1649 	 * is nothing else to do, So return DDI_SUCCESS,
1650 	 * but mark the SUSPEND bit in the soft state
1651 	 * before leaving.
1652 	 */
1653 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1654 		port->fp_soft_state |= FP_SOFT_SUSPEND;
1655 		mutex_exit(&port->fp_mutex);
1656 		return (DDI_SUCCESS);
1657 	}
1658 
1659 	/*
1660 	 * Check if an unsolicited callback or state change handling is
1661 	 * in progress. If true, fail the suspend operation; also throw
1662 	 * a warning message notifying the failure. Note that Sun PCI
1663 	 * hotplug spec recommends messages in cases of failure (but
1664 	 * not flooding the console)
1665 	 *
1666 	 * Busy waiting for a short interval (500 millisecond ?) to see
1667 	 * if the callback processing completes may be another idea. Since
1668 	 * most of the callback processing involves a lot of work, it
1669 	 * is safe to just fail the SUSPEND operation. It is definitely
1670 	 * not bad to fail the SUSPEND operation if the driver is busy.
1671 	 */
1672 	delay_count = 0;
1673 	while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1674 	    FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1675 		mutex_exit(&port->fp_mutex);
1676 		delay_count++;
1677 		delay(drv_usectohz(1000000));
1678 		mutex_enter(&port->fp_mutex);
1679 	}
1680 
1681 	if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1682 	    FP_SOFT_IN_UNSOL_CB)) {
1683 		mutex_exit(&port->fp_mutex);
1684 		cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1685 		    " Failing suspend", port->fp_instance);
1686 		return (DDI_FAILURE);
1687 	}
1688 
1689 	/*
1690 	 * Check of FC port thread is busy
1691 	 */
1692 	if (port->fp_job_head) {
1693 		mutex_exit(&port->fp_mutex);
1694 		FP_TRACE(FP_NHEAD2(9, 0),
1695 		    "FC port thread is busy: Failing suspend");
1696 		return (DDI_FAILURE);
1697 	}
1698 	port->fp_soft_state |= FP_SOFT_SUSPEND;
1699 
1700 	fp_suspend_all(port);
1701 	mutex_exit(&port->fp_mutex);
1702 
1703 	return (DDI_SUCCESS);
1704 }
1705 
1706 
1707 /*
1708  * Prepare for graceful power down of a FC port
1709  */
1710 static int
1711 fp_power_down(fc_local_port_t *port)
1712 {
1713 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1714 
1715 	/*
1716 	 * Power down request followed by a DDI_SUSPEND should
1717 	 * never happen; If it does return DDI_SUCCESS
1718 	 */
1719 	if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1720 		port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1721 		return (DDI_SUCCESS);
1722 	}
1723 
1724 	/*
1725 	 * If the port is already power suspended, there
1726 	 * is nothing else to do, So return DDI_SUCCESS,
1727 	 */
1728 	if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1729 		return (DDI_SUCCESS);
1730 	}
1731 
1732 	/*
1733 	 * Check if an unsolicited callback or state change handling
1734 	 * is in progress. If true, fail the PM suspend operation.
1735 	 * But don't print a message unless the verbosity of the
1736 	 * driver desires otherwise.
1737 	 */
1738 	if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1739 	    (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1740 		FP_TRACE(FP_NHEAD2(9, 0),
1741 		    "Unsolicited callback in progress: Failing power down");
1742 		return (DDI_FAILURE);
1743 	}
1744 
1745 	/*
1746 	 * Check of FC port thread is busy
1747 	 */
1748 	if (port->fp_job_head) {
1749 		FP_TRACE(FP_NHEAD2(9, 0),
1750 		    "FC port thread is busy: Failing power down");
1751 		return (DDI_FAILURE);
1752 	}
1753 	port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1754 
1755 	/*
1756 	 * check if the ULPs are ready for power down
1757 	 */
1758 	mutex_exit(&port->fp_mutex);
1759 	if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1760 	    &modlinkage) != FC_SUCCESS) {
1761 		mutex_enter(&port->fp_mutex);
1762 		port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1763 		mutex_exit(&port->fp_mutex);
1764 
1765 		/*
1766 		 * Power back up the obedient ULPs that went down
1767 		 */
1768 		fp_attach_ulps(port, FC_CMD_POWER_UP);
1769 
1770 		FP_TRACE(FP_NHEAD2(9, 0),
1771 		    "ULP(s) busy, detach_ulps failed. Failing power down");
1772 		mutex_enter(&port->fp_mutex);
1773 		return (DDI_FAILURE);
1774 	}
1775 	mutex_enter(&port->fp_mutex);
1776 
1777 	fp_suspend_all(port);
1778 
1779 	return (DDI_SUCCESS);
1780 }
1781 
1782 
1783 /*
1784  * Suspend the entire FC port
1785  */
1786 static void
1787 fp_suspend_all(fc_local_port_t *port)
1788 {
1789 	int			index;
1790 	struct pwwn_hash 	*head;
1791 	fc_remote_port_t 	*pd;
1792 
1793 	ASSERT(MUTEX_HELD(&port->fp_mutex));
1794 
1795 	if (port->fp_wait_tid != 0) {
1796 		timeout_id_t 	tid;
1797 
1798 		tid = port->fp_wait_tid;
1799 		port->fp_wait_tid = (timeout_id_t)NULL;
1800 		mutex_exit(&port->fp_mutex);
1801 		(void) untimeout(tid);
1802 		mutex_enter(&port->fp_mutex);
1803 		port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1804 	}
1805 
1806 	if (port->fp_offline_tid) {
1807 		timeout_id_t 	tid;
1808 
1809 		tid = port->fp_offline_tid;
1810 		port->fp_offline_tid = (timeout_id_t)NULL;
1811 		mutex_exit(&port->fp_mutex);
1812 		(void) untimeout(tid);
1813 		mutex_enter(&port->fp_mutex);
1814 		port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1815 	}
1816 	mutex_exit(&port->fp_mutex);
1817 	port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1818 	mutex_enter(&port->fp_mutex);
1819 
1820 	/*
1821 	 * Mark all devices as OLD, and reset the LOGIN state as well
1822 	 * (this will force the ULPs to perform a LOGIN after calling
1823 	 * fc_portgetmap() during RESUME/PM_RESUME)
1824 	 */
1825 	for (index = 0; index < pwwn_table_size; index++) {
1826 		head = &port->fp_pwwn_table[index];
1827 		pd = head->pwwn_head;
1828 		while (pd != NULL) {
1829 			mutex_enter(&pd->pd_mutex);
1830 			fp_remote_port_offline(pd);
1831 			fctl_delist_did_table(port, pd);
1832 			pd->pd_state = PORT_DEVICE_VALID;
1833 			pd->pd_login_count = 0;
1834 			mutex_exit(&pd->pd_mutex);
1835 			pd = pd->pd_wwn_hnext;
1836 		}
1837 	}
1838 }
1839 
1840 
1841 /*
1842  * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1843  * Performs intializations for fc_packet_t structs.
1844  * Returns 0 for success or -1 for failure.
1845  *
1846  * This function allocates DMA handles for both command and responses.
1847  * Most of the ELSs used have both command and responses so it is strongly
1848  * desired to move them to cache constructor routine.
1849  *
1850  * Context: Can sleep iff called with KM_SLEEP flag.
1851  */
1852 static int
1853 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1854 {
1855 	int 		(*cb) (caddr_t);
1856 	fc_packet_t 	*pkt;
1857 	fp_cmd_t 	*cmd = (fp_cmd_t *)buf;
1858 	fc_local_port_t *port = (fc_local_port_t *)cdarg;
1859 
1860 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1861 
1862 	cmd->cmd_next = NULL;
1863 	cmd->cmd_flags = 0;
1864 	cmd->cmd_dflags = 0;
1865 	cmd->cmd_job = NULL;
1866 	cmd->cmd_port = port;
1867 	pkt = &cmd->cmd_pkt;
1868 
1869 	if (ddi_dma_alloc_handle(port->fp_fca_dip,
1870 	    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1871 	    &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1872 		return (-1);
1873 	}
1874 
1875 	if (ddi_dma_alloc_handle(port->fp_fca_dip,
1876 	    port->fp_fca_tran->fca_dma_attr, cb, NULL,
1877 	    &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1878 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1879 		return (-1);
1880 	}
1881 
1882 	pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1883 	pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1884 	    pkt->pkt_data_cookie_cnt = 0;
1885 	pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1886 	    pkt->pkt_data_cookie = NULL;
1887 	pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1888 
1889 	return (0);
1890 }
1891 
1892 
1893 /*
1894  * fp_cache_destructor: Destructor function for kmem_cache_create().
1895  * Performs un-intializations for fc_packet_t structs.
1896  */
1897 /* ARGSUSED */
1898 static void
1899 fp_cache_destructor(void *buf, void *cdarg)
1900 {
1901 	fp_cmd_t 	*cmd = (fp_cmd_t *)buf;
1902 	fc_packet_t	*pkt;
1903 
1904 	pkt = &cmd->cmd_pkt;
1905 	if (pkt->pkt_cmd_dma) {
1906 		ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1907 	}
1908 
1909 	if (pkt->pkt_resp_dma) {
1910 		ddi_dma_free_handle(&pkt->pkt_resp_dma);
1911 	}
1912 }
1913 
1914 
1915 /*
1916  * Packet allocation for ELS and any other port driver commands
1917  *
1918  * Some ELSs like FLOGI and PLOGI are critical for topology and
1919  * device discovery and a system's inability to allocate memory
1920  * or DVMA resources while performing some of these critical ELSs
1921  * cause a lot of problem. While memory allocation failures are
1922  * rare, DVMA resource failures are common as the applications
1923  * are becoming more and more powerful on huge servers.  So it
1924  * is desirable to have a framework support to reserve a fragment
1925  * of DVMA. So until this is fixed the correct way, the suffering
1926  * is huge whenever a LIP happens at a time DVMA resources are
1927  * drained out completely - So an attempt needs to be made to
1928  * KM_SLEEP while requesting for these resources, hoping that
1929  * the requests won't hang forever.
1930  *
1931  * The fc_remote_port_t argument is stored into the pkt_pd field in the
1932  * fc_packet_t struct prior to the fc_ulp_init_packet() call.  This
1933  * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1934  * If there is no fc_remote_port_t associated with the fc_packet_t, then
1935  * fp_alloc_pkt() must be called with pd set to NULL.
1936  */
1937 
1938 static fp_cmd_t *
1939 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1940     fc_remote_port_t *pd)
1941 {
1942 	int		rval;
1943 	ulong_t		real_len;
1944 	fp_cmd_t 	*cmd;
1945 	fc_packet_t	*pkt;
1946 	int 		(*cb) (caddr_t);
1947 	ddi_dma_cookie_t	pkt_cookie;
1948 	ddi_dma_cookie_t	*cp;
1949 	uint32_t		cnt;
1950 
1951 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
1952 
1953 	cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1954 
1955 	cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1956 	if (cmd == NULL) {
1957 		return (cmd);
1958 	}
1959 
1960 	cmd->cmd_ulp_pkt = NULL;
1961 	cmd->cmd_flags = 0;
1962 	pkt = &cmd->cmd_pkt;
1963 	ASSERT(cmd->cmd_dflags == 0);
1964 
1965 	pkt->pkt_datalen = 0;
1966 	pkt->pkt_data = NULL;
1967 	pkt->pkt_state = 0;
1968 	pkt->pkt_action = 0;
1969 	pkt->pkt_reason = 0;
1970 	pkt->pkt_expln = 0;
1971 
1972 	/*
1973 	 * Init pkt_pd with the given pointer; this must be done _before_
1974 	 * the call to fc_ulp_init_packet().
1975 	 */
1976 	pkt->pkt_pd = pd;
1977 
1978 	/* Now call the FCA driver to init its private, per-packet fields */
1979 	if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
1980 		goto alloc_pkt_failed;
1981 	}
1982 
1983 	if (cmd_len) {
1984 		ASSERT(pkt->pkt_cmd_dma != NULL);
1985 
1986 		rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
1987 		    port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
1988 		    cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
1989 		    &pkt->pkt_cmd_acc);
1990 
1991 		if (rval != DDI_SUCCESS) {
1992 			goto alloc_pkt_failed;
1993 		}
1994 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
1995 
1996 		if (real_len < cmd_len) {
1997 			goto alloc_pkt_failed;
1998 		}
1999 
2000 		rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2001 		    pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2002 		    DDI_DMA_CONSISTENT, cb, NULL,
2003 		    &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2004 
2005 		if (rval != DDI_DMA_MAPPED) {
2006 			goto alloc_pkt_failed;
2007 		}
2008 
2009 		cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2010 
2011 		if (pkt->pkt_cmd_cookie_cnt >
2012 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2013 			goto alloc_pkt_failed;
2014 		}
2015 
2016 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2017 
2018 		cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2019 		    pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2020 		    KM_NOSLEEP);
2021 
2022 		if (cp == NULL) {
2023 			goto alloc_pkt_failed;
2024 		}
2025 
2026 		*cp = pkt_cookie;
2027 		cp++;
2028 		for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2029 			ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2030 			*cp = pkt_cookie;
2031 		}
2032 	}
2033 
2034 	if (resp_len) {
2035 		ASSERT(pkt->pkt_resp_dma != NULL);
2036 
2037 		rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2038 		    port->fp_fca_tran->fca_acc_attr,
2039 		    DDI_DMA_CONSISTENT, cb, NULL,
2040 		    (caddr_t *)&pkt->pkt_resp, &real_len,
2041 		    &pkt->pkt_resp_acc);
2042 
2043 		if (rval != DDI_SUCCESS) {
2044 			goto alloc_pkt_failed;
2045 		}
2046 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2047 
2048 		if (real_len < resp_len) {
2049 			goto alloc_pkt_failed;
2050 		}
2051 
2052 		rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2053 		    pkt->pkt_resp, real_len, DDI_DMA_READ |
2054 		    DDI_DMA_CONSISTENT, cb, NULL,
2055 		    &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2056 
2057 		if (rval != DDI_DMA_MAPPED) {
2058 			goto alloc_pkt_failed;
2059 		}
2060 
2061 		cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2062 
2063 		if (pkt->pkt_resp_cookie_cnt >
2064 		    port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2065 			goto alloc_pkt_failed;
2066 		}
2067 
2068 		ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2069 
2070 		cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2071 		    pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2072 		    KM_NOSLEEP);
2073 
2074 		if (cp == NULL) {
2075 			goto alloc_pkt_failed;
2076 		}
2077 
2078 		*cp = pkt_cookie;
2079 		cp++;
2080 		for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2081 			ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2082 			*cp = pkt_cookie;
2083 		}
2084 	}
2085 
2086 	pkt->pkt_cmdlen = cmd_len;
2087 	pkt->pkt_rsplen = resp_len;
2088 	pkt->pkt_ulp_private = cmd;
2089 
2090 	return (cmd);
2091 
2092 alloc_pkt_failed:
2093 
2094 	fp_free_dma(cmd);
2095 
2096 	if (pkt->pkt_cmd_cookie != NULL) {
2097 		kmem_free(pkt->pkt_cmd_cookie,
2098 		    pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2099 		pkt->pkt_cmd_cookie = NULL;
2100 	}
2101 
2102 	if (pkt->pkt_resp_cookie != NULL) {
2103 		kmem_free(pkt->pkt_resp_cookie,
2104 		    pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2105 		pkt->pkt_resp_cookie = NULL;
2106 	}
2107 
2108 	kmem_cache_free(port->fp_pkt_cache, cmd);
2109 
2110 	return (NULL);
2111 }
2112 
2113 
2114 /*
2115  * Free FC packet
2116  */
2117 static void
2118 fp_free_pkt(fp_cmd_t *cmd)
2119 {
2120 	fc_local_port_t *port;
2121 	fc_packet_t 	*pkt;
2122 
2123 	ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2124 
2125 	cmd->cmd_next = NULL;
2126 	cmd->cmd_job = NULL;
2127 	pkt = &cmd->cmd_pkt;
2128 	pkt->pkt_ulp_private = 0;
2129 	pkt->pkt_tran_flags = 0;
2130 	pkt->pkt_tran_type = 0;
2131 	port = cmd->cmd_port;
2132 
2133 	if (pkt->pkt_cmd_cookie != NULL) {
2134 		kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2135 		    sizeof (ddi_dma_cookie_t));
2136 		pkt->pkt_cmd_cookie = NULL;
2137 	}
2138 
2139 	if (pkt->pkt_resp_cookie != NULL) {
2140 		kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2141 		    sizeof (ddi_dma_cookie_t));
2142 		pkt->pkt_resp_cookie = NULL;
2143 	}
2144 
2145 	fp_free_dma(cmd);
2146 	(void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2147 	kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2148 }
2149 
2150 
2151 /*
2152  * Release DVMA resources
2153  */
2154 static void
2155 fp_free_dma(fp_cmd_t *cmd)
2156 {
2157 	fc_packet_t *pkt = &cmd->cmd_pkt;
2158 
2159 	pkt->pkt_cmdlen = 0;
2160 	pkt->pkt_rsplen = 0;
2161 	pkt->pkt_tran_type = 0;
2162 	pkt->pkt_tran_flags = 0;
2163 
2164 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2165 		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2166 	}
2167 
2168 	if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2169 		if (pkt->pkt_cmd_acc) {
2170 			ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2171 		}
2172 	}
2173 
2174 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2175 		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2176 	}
2177 
2178 	if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2179 		if (pkt->pkt_resp_acc) {
2180 			ddi_dma_mem_free(&pkt->pkt_resp_acc);
2181 		}
2182 	}
2183 	cmd->cmd_dflags = 0;
2184 }
2185 
2186 
2187 /*
2188  * Dedicated thread to perform various activities.  One thread for
2189  * each fc_local_port_t (driver soft state) instance.
2190  * Note, this effectively works out to one thread for each local
2191  * port, but there are also some Solaris taskq threads in use on a per-local
2192  * port basis; these also need to be taken into consideration.
2193  */
2194 static void
2195 fp_job_handler(fc_local_port_t *port)
2196 {
2197 	int			rval;
2198 	uint32_t		*d_id;
2199 	fc_remote_port_t 	*pd;
2200 	job_request_t 		*job;
2201 
2202 #ifndef	__lock_lint
2203 	/*
2204 	 * Solaris-internal stuff for proper operation of kernel threads
2205 	 * with Solaris CPR.
2206 	 */
2207 	CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2208 	    callb_generic_cpr, "fp_job_handler");
2209 #endif
2210 
2211 
2212 	/* Loop forever waiting for work to do */
2213 	for (;;) {
2214 
2215 		mutex_enter(&port->fp_mutex);
2216 
2217 		/*
2218 		 * Sleep if no work to do right now, or if we want
2219 		 * to suspend or power-down.
2220 		 */
2221 		while (port->fp_job_head == NULL ||
2222 		    (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2223 		    FP_SOFT_SUSPEND))) {
2224 			CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2225 			cv_wait(&port->fp_cv, &port->fp_mutex);
2226 			CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2227 		}
2228 
2229 		/*
2230 		 * OK, we've just been woken up, so retrieve the next entry
2231 		 * from the head of the job queue for this local port.
2232 		 */
2233 		job = fctl_deque_job(port);
2234 
2235 		/*
2236 		 * Handle all the fp driver's supported job codes here
2237 		 * in this big honkin' switch.
2238 		 */
2239 		switch (job->job_code) {
2240 		case JOB_PORT_SHUTDOWN:
2241 			/*
2242 			 * fp_port_shutdown() is only called from here. This
2243 			 * will prepare the local port instance (softstate)
2244 			 * for detaching.  This cancels timeout callbacks,
2245 			 * executes LOGOs with remote ports, cleans up tables,
2246 			 * and deallocates data structs.
2247 			 */
2248 			fp_port_shutdown(port, job);
2249 
2250 			/*
2251 			 * This will exit the job thread.
2252 			 */
2253 #ifndef __lock_lint
2254 			CALLB_CPR_EXIT(&(port->fp_cpr_info));
2255 #else
2256 			mutex_exit(&port->fp_mutex);
2257 #endif
2258 			fctl_jobdone(job);
2259 			thread_exit();
2260 
2261 			/* NOTREACHED */
2262 
2263 		case JOB_ATTACH_ULP: {
2264 			/*
2265 			 * This job is spawned in response to a ULP calling
2266 			 * fc_ulp_add().
2267 			 */
2268 
2269 			boolean_t do_attach_ulps = B_TRUE;
2270 
2271 			/*
2272 			 * If fp is detaching, we don't want to call
2273 			 * fp_startup_done as this asynchronous
2274 			 * notification may interfere with the re-attach.
2275 			 */
2276 
2277 			if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2278 			    FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2279 				do_attach_ulps = B_FALSE;
2280 			} else {
2281 				/*
2282 				 * We are going to force the transport
2283 				 * to attach to the ULPs, so set
2284 				 * fp_ulp_attach.  This will keep any
2285 				 * potential detach from occurring until
2286 				 * we are done.
2287 				 */
2288 				port->fp_ulp_attach = 1;
2289 			}
2290 
2291 			mutex_exit(&port->fp_mutex);
2292 
2293 			/*
2294 			 * NOTE: Since we just dropped the mutex, there is now
2295 			 * a race window where the fp_soft_state check above
2296 			 * could change here.  This race is covered because an
2297 			 * additional check was added in the functions hidden
2298 			 * under fp_startup_done().
2299 			 */
2300 			if (do_attach_ulps == B_TRUE) {
2301 				/*
2302 				 * This goes thru a bit of a convoluted call
2303 				 * chain before spawning off a DDI taskq
2304 				 * request to perform the actual attach
2305 				 * operations. Blocking can occur at a number
2306 				 * of points.
2307 				 */
2308 				fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2309 			}
2310 			job->job_result = FC_SUCCESS;
2311 			fctl_jobdone(job);
2312 			break;
2313 		}
2314 
2315 		case JOB_ULP_NOTIFY: {
2316 			/*
2317 			 * Pass state change notifications up to any/all
2318 			 * registered ULPs.
2319 			 */
2320 			uint32_t statec;
2321 
2322 			statec = job->job_ulp_listlen;
2323 			if (statec == FC_STATE_RESET_REQUESTED) {
2324 				port->fp_last_task = port->fp_task;
2325 				port->fp_task = FP_TASK_OFFLINE;
2326 				fp_port_offline(port, 0);
2327 				port->fp_task = port->fp_last_task;
2328 				port->fp_last_task = FP_TASK_IDLE;
2329 			}
2330 
2331 			if (--port->fp_statec_busy == 0) {
2332 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2333 			}
2334 
2335 			mutex_exit(&port->fp_mutex);
2336 
2337 			job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2338 			fctl_jobdone(job);
2339 			break;
2340 		}
2341 
2342 		case JOB_PLOGI_ONE:
2343 			/*
2344 			 * Issue a PLOGI to a single remote port. Multiple
2345 			 * PLOGIs to different remote ports may occur in
2346 			 * parallel.
2347 			 * This can create the fc_remote_port_t if it does not
2348 			 * already exist.
2349 			 */
2350 
2351 			mutex_exit(&port->fp_mutex);
2352 			d_id = (uint32_t *)job->job_private;
2353 			pd = fctl_get_remote_port_by_did(port, *d_id);
2354 
2355 			if (pd) {
2356 				mutex_enter(&pd->pd_mutex);
2357 				if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2358 					pd->pd_login_count++;
2359 					mutex_exit(&pd->pd_mutex);
2360 					job->job_result = FC_SUCCESS;
2361 					fctl_jobdone(job);
2362 					break;
2363 				}
2364 				mutex_exit(&pd->pd_mutex);
2365 			} else {
2366 				mutex_enter(&port->fp_mutex);
2367 				if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2368 					mutex_exit(&port->fp_mutex);
2369 					pd = fp_create_remote_port_by_ns(port,
2370 					    *d_id, KM_SLEEP);
2371 					if (pd == NULL) {
2372 						job->job_result = FC_FAILURE;
2373 						fctl_jobdone(job);
2374 						break;
2375 					}
2376 				} else {
2377 					mutex_exit(&port->fp_mutex);
2378 				}
2379 			}
2380 
2381 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2382 			job->job_counter = 1;
2383 
2384 			rval = fp_port_login(port, *d_id, job,
2385 			    FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2386 
2387 			if (rval != FC_SUCCESS) {
2388 				job->job_result = rval;
2389 				fctl_jobdone(job);
2390 			}
2391 			break;
2392 
2393 		case JOB_LOGO_ONE: {
2394 			/*
2395 			 * Issue a PLOGO to a single remote port. Multiple
2396 			 * PLOGOs to different remote ports may occur in
2397 			 * parallel.
2398 			 */
2399 			fc_remote_port_t *pd;
2400 
2401 #ifndef	__lock_lint
2402 			ASSERT(job->job_counter > 0);
2403 #endif
2404 
2405 			pd = (fc_remote_port_t *)job->job_ulp_pkts;
2406 
2407 			mutex_enter(&pd->pd_mutex);
2408 			if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2409 				mutex_exit(&pd->pd_mutex);
2410 				job->job_result = FC_LOGINREQ;
2411 				mutex_exit(&port->fp_mutex);
2412 				fctl_jobdone(job);
2413 				break;
2414 			}
2415 			if (pd->pd_login_count > 1) {
2416 				pd->pd_login_count--;
2417 				mutex_exit(&pd->pd_mutex);
2418 				job->job_result = FC_SUCCESS;
2419 				mutex_exit(&port->fp_mutex);
2420 				fctl_jobdone(job);
2421 				break;
2422 			}
2423 			mutex_exit(&pd->pd_mutex);
2424 			mutex_exit(&port->fp_mutex);
2425 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2426 			(void) fp_logout(port, pd, job);
2427 			break;
2428 		}
2429 
2430 		case JOB_FCIO_LOGIN:
2431 			/*
2432 			 * PLOGI initiated at ioctl request.
2433 			 */
2434 			mutex_exit(&port->fp_mutex);
2435 			job->job_result =
2436 			    fp_fcio_login(port, job->job_private, job);
2437 			fctl_jobdone(job);
2438 			break;
2439 
2440 		case JOB_FCIO_LOGOUT:
2441 			/*
2442 			 * PLOGO initiated at ioctl request.
2443 			 */
2444 			mutex_exit(&port->fp_mutex);
2445 			job->job_result =
2446 			    fp_fcio_logout(port, job->job_private, job);
2447 			fctl_jobdone(job);
2448 			break;
2449 
2450 		case JOB_PORT_GETMAP:
2451 		case JOB_PORT_GETMAP_PLOGI_ALL: {
2452 			port->fp_last_task = port->fp_task;
2453 			port->fp_task = FP_TASK_GETMAP;
2454 
2455 			switch (port->fp_topology) {
2456 			case FC_TOP_PRIVATE_LOOP:
2457 				job->job_counter = 1;
2458 
2459 				fp_get_loopmap(port, job);
2460 				mutex_exit(&port->fp_mutex);
2461 				fp_jobwait(job);
2462 				fctl_fillout_map(port,
2463 				    (fc_portmap_t **)job->job_private,
2464 				    (uint32_t *)job->job_arg, 1, 0, 0);
2465 				fctl_jobdone(job);
2466 				mutex_enter(&port->fp_mutex);
2467 				break;
2468 
2469 			case FC_TOP_PUBLIC_LOOP:
2470 			case FC_TOP_FABRIC:
2471 				mutex_exit(&port->fp_mutex);
2472 				job->job_counter = 1;
2473 
2474 				job->job_result = fp_ns_getmap(port,
2475 				    job, (fc_portmap_t **)job->job_private,
2476 				    (uint32_t *)job->job_arg,
2477 				    FCTL_GAN_START_ID);
2478 				fctl_jobdone(job);
2479 				mutex_enter(&port->fp_mutex);
2480 				break;
2481 
2482 			case FC_TOP_PT_PT:
2483 				mutex_exit(&port->fp_mutex);
2484 				fctl_fillout_map(port,
2485 				    (fc_portmap_t **)job->job_private,
2486 				    (uint32_t *)job->job_arg, 1, 0, 0);
2487 				fctl_jobdone(job);
2488 				mutex_enter(&port->fp_mutex);
2489 				break;
2490 
2491 			default:
2492 				mutex_exit(&port->fp_mutex);
2493 				fctl_jobdone(job);
2494 				mutex_enter(&port->fp_mutex);
2495 				break;
2496 			}
2497 			port->fp_task = port->fp_last_task;
2498 			port->fp_last_task = FP_TASK_IDLE;
2499 			mutex_exit(&port->fp_mutex);
2500 			break;
2501 		}
2502 
2503 		case JOB_PORT_OFFLINE: {
2504 			fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2505 
2506 			port->fp_last_task = port->fp_task;
2507 			port->fp_task = FP_TASK_OFFLINE;
2508 
2509 			if (port->fp_statec_busy > 2) {
2510 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2511 				fp_port_offline(port, 0);
2512 				if (--port->fp_statec_busy == 0) {
2513 					port->fp_soft_state &=
2514 					    ~FP_SOFT_IN_STATEC_CB;
2515 				}
2516 			} else {
2517 				fp_port_offline(port, 1);
2518 			}
2519 
2520 			port->fp_task = port->fp_last_task;
2521 			port->fp_last_task = FP_TASK_IDLE;
2522 
2523 			mutex_exit(&port->fp_mutex);
2524 
2525 			fctl_jobdone(job);
2526 			break;
2527 		}
2528 
2529 		case JOB_PORT_STARTUP: {
2530 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2531 				if (port->fp_statec_busy > 1) {
2532 					mutex_exit(&port->fp_mutex);
2533 					break;
2534 				}
2535 				mutex_exit(&port->fp_mutex);
2536 
2537 				FP_TRACE(FP_NHEAD2(9, rval),
2538 				    "Topology discovery failed");
2539 				break;
2540 			}
2541 
2542 			/*
2543 			 * Attempt building device handles in case
2544 			 * of private Loop.
2545 			 */
2546 			if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2547 				job->job_counter = 1;
2548 
2549 				fp_get_loopmap(port, job);
2550 				mutex_exit(&port->fp_mutex);
2551 				fp_jobwait(job);
2552 				mutex_enter(&port->fp_mutex);
2553 				if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2554 					ASSERT(port->fp_total_devices == 0);
2555 					port->fp_total_devices =
2556 					    port->fp_dev_count;
2557 				}
2558 			} else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2559 				/*
2560 				 * Hack to avoid state changes going up early
2561 				 */
2562 				port->fp_statec_busy++;
2563 				port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2564 
2565 				job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2566 				fp_fabric_online(port, job);
2567 				job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2568 			}
2569 			mutex_exit(&port->fp_mutex);
2570 			fctl_jobdone(job);
2571 			break;
2572 		}
2573 
2574 		case JOB_PORT_ONLINE: {
2575 			char 		*newtop;
2576 			char 		*oldtop;
2577 			uint32_t	old_top;
2578 
2579 			fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2580 
2581 			/*
2582 			 * Bail out early if there are a lot of
2583 			 * state changes in the pipeline
2584 			 */
2585 			if (port->fp_statec_busy > 1) {
2586 				--port->fp_statec_busy;
2587 				mutex_exit(&port->fp_mutex);
2588 				fctl_jobdone(job);
2589 				break;
2590 			}
2591 
2592 			switch (old_top = port->fp_topology) {
2593 			case FC_TOP_PRIVATE_LOOP:
2594 				oldtop = "Private Loop";
2595 				break;
2596 
2597 			case FC_TOP_PUBLIC_LOOP:
2598 				oldtop = "Public Loop";
2599 				break;
2600 
2601 			case FC_TOP_PT_PT:
2602 				oldtop = "Point to Point";
2603 				break;
2604 
2605 			case FC_TOP_FABRIC:
2606 				oldtop = "Fabric";
2607 				break;
2608 
2609 			default:
2610 				oldtop = NULL;
2611 				break;
2612 			}
2613 
2614 			port->fp_last_task = port->fp_task;
2615 			port->fp_task = FP_TASK_ONLINE;
2616 
2617 			if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2618 
2619 				port->fp_task = port->fp_last_task;
2620 				port->fp_last_task = FP_TASK_IDLE;
2621 
2622 				if (port->fp_statec_busy > 1) {
2623 					--port->fp_statec_busy;
2624 					mutex_exit(&port->fp_mutex);
2625 					break;
2626 				}
2627 
2628 				port->fp_state = FC_STATE_OFFLINE;
2629 
2630 				FP_TRACE(FP_NHEAD2(9, rval),
2631 				    "Topology discovery failed");
2632 
2633 				if (--port->fp_statec_busy == 0) {
2634 					port->fp_soft_state &=
2635 					    ~FP_SOFT_IN_STATEC_CB;
2636 				}
2637 
2638 				if (port->fp_offline_tid == NULL) {
2639 					port->fp_offline_tid =
2640 					    timeout(fp_offline_timeout,
2641 					    (caddr_t)port, fp_offline_ticks);
2642 				}
2643 
2644 				mutex_exit(&port->fp_mutex);
2645 				break;
2646 			}
2647 
2648 			switch (port->fp_topology) {
2649 			case FC_TOP_PRIVATE_LOOP:
2650 				newtop = "Private Loop";
2651 				break;
2652 
2653 			case FC_TOP_PUBLIC_LOOP:
2654 				newtop = "Public Loop";
2655 				break;
2656 
2657 			case FC_TOP_PT_PT:
2658 				newtop = "Point to Point";
2659 				break;
2660 
2661 			case FC_TOP_FABRIC:
2662 				newtop = "Fabric";
2663 				break;
2664 
2665 			default:
2666 				newtop = NULL;
2667 				break;
2668 			}
2669 
2670 			if (oldtop && newtop && strcmp(oldtop, newtop)) {
2671 				fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2672 				    "Change in FC Topology old = %s new = %s",
2673 				    oldtop, newtop);
2674 			}
2675 
2676 			switch (port->fp_topology) {
2677 			case FC_TOP_PRIVATE_LOOP: {
2678 				int orphan = (old_top == FC_TOP_FABRIC ||
2679 				    old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2680 
2681 				mutex_exit(&port->fp_mutex);
2682 				fp_loop_online(port, job, orphan);
2683 				break;
2684 			}
2685 
2686 			case FC_TOP_PUBLIC_LOOP:
2687 				/* FALLTHROUGH */
2688 			case FC_TOP_FABRIC:
2689 				fp_fabric_online(port, job);
2690 				mutex_exit(&port->fp_mutex);
2691 				break;
2692 
2693 			case FC_TOP_PT_PT:
2694 				fp_p2p_online(port, job);
2695 				mutex_exit(&port->fp_mutex);
2696 				break;
2697 
2698 			default:
2699 				if (--port->fp_statec_busy != 0) {
2700 					/*
2701 					 * Watch curiously at what the next
2702 					 * state transition can do.
2703 					 */
2704 					mutex_exit(&port->fp_mutex);
2705 					break;
2706 				}
2707 
2708 				FP_TRACE(FP_NHEAD2(9, 0),
2709 				    "Topology Unknown, Offlining the port..");
2710 
2711 				port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2712 				port->fp_state = FC_STATE_OFFLINE;
2713 
2714 				if (port->fp_offline_tid == NULL) {
2715 					port->fp_offline_tid =
2716 					    timeout(fp_offline_timeout,
2717 					    (caddr_t)port, fp_offline_ticks);
2718 				}
2719 				mutex_exit(&port->fp_mutex);
2720 				break;
2721 			}
2722 
2723 			mutex_enter(&port->fp_mutex);
2724 
2725 			port->fp_task = port->fp_last_task;
2726 			port->fp_last_task = FP_TASK_IDLE;
2727 
2728 			mutex_exit(&port->fp_mutex);
2729 
2730 			fctl_jobdone(job);
2731 			break;
2732 		}
2733 
2734 		case JOB_PLOGI_GROUP: {
2735 			mutex_exit(&port->fp_mutex);
2736 			fp_plogi_group(port, job);
2737 			break;
2738 		}
2739 
2740 		case JOB_UNSOL_REQUEST: {
2741 			mutex_exit(&port->fp_mutex);
2742 			fp_handle_unsol_buf(port,
2743 			    (fc_unsol_buf_t *)job->job_private, job);
2744 			fctl_dealloc_job(job);
2745 			break;
2746 		}
2747 
2748 		case JOB_NS_CMD: {
2749 			fctl_ns_req_t *ns_cmd;
2750 
2751 			mutex_exit(&port->fp_mutex);
2752 
2753 			job->job_flags |= JOB_TYPE_FP_ASYNC;
2754 			ns_cmd = (fctl_ns_req_t *)job->job_private;
2755 			if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2756 			    ns_cmd->ns_cmd_code > NS_DA_ID) {
2757 				job->job_result = FC_BADCMD;
2758 				fctl_jobdone(job);
2759 				break;
2760 			}
2761 
2762 			if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2763 				if (ns_cmd->ns_pd != NULL) {
2764 					job->job_result = FC_BADOBJECT;
2765 					fctl_jobdone(job);
2766 					break;
2767 				}
2768 
2769 				job->job_counter = 1;
2770 
2771 				rval = fp_ns_reg(port, ns_cmd->ns_pd,
2772 				    ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2773 
2774 				if (rval != FC_SUCCESS) {
2775 					job->job_result = rval;
2776 					fctl_jobdone(job);
2777 				}
2778 				break;
2779 			}
2780 			job->job_result = FC_SUCCESS;
2781 			job->job_counter = 1;
2782 
2783 			rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2784 			if (rval != FC_SUCCESS) {
2785 				fctl_jobdone(job);
2786 			}
2787 			break;
2788 		}
2789 
2790 		case JOB_LINK_RESET: {
2791 			la_wwn_t *pwwn;
2792 			uint32_t topology;
2793 
2794 			pwwn = (la_wwn_t *)job->job_private;
2795 			ASSERT(pwwn != NULL);
2796 
2797 			topology = port->fp_topology;
2798 			mutex_exit(&port->fp_mutex);
2799 
2800 			if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2801 			    topology == FC_TOP_PRIVATE_LOOP) {
2802 				job->job_flags |= JOB_TYPE_FP_ASYNC;
2803 				rval = port->fp_fca_tran->fca_reset(
2804 				    port->fp_fca_handle, FC_FCA_LINK_RESET);
2805 				job->job_result = rval;
2806 				fp_jobdone(job);
2807 			} else {
2808 				ASSERT((job->job_flags &
2809 				    JOB_TYPE_FP_ASYNC) == 0);
2810 
2811 				if (FC_IS_TOP_SWITCH(topology)) {
2812 					rval = fp_remote_lip(port, pwwn,
2813 					    KM_SLEEP, job);
2814 				} else {
2815 					rval = FC_FAILURE;
2816 				}
2817 				if (rval != FC_SUCCESS) {
2818 					job->job_result = rval;
2819 				}
2820 				fctl_jobdone(job);
2821 			}
2822 			break;
2823 		}
2824 
2825 		default:
2826 			mutex_exit(&port->fp_mutex);
2827 			job->job_result = FC_BADCMD;
2828 			fctl_jobdone(job);
2829 			break;
2830 		}
2831 	}
2832 	/* NOTREACHED */
2833 }
2834 
2835 
2836 /*
2837  * Perform FC port bring up initialization
2838  */
2839 static int
2840 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2841 {
2842 	int		rval;
2843 	uint32_t	state;
2844 	uint32_t	src_id;
2845 	fc_lilpmap_t 	*lilp_map;
2846 
2847 	ASSERT(MUTEX_HELD(&port->fp_mutex));
2848 	ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2849 
2850 	FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2851 	    " port=%p, job=%p", port, job);
2852 
2853 	port->fp_topology = FC_TOP_UNKNOWN;
2854 	port->fp_port_id.port_id = 0;
2855 	state = FC_PORT_STATE_MASK(port->fp_state);
2856 
2857 	if (state == FC_STATE_OFFLINE) {
2858 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2859 		job->job_result = FC_OFFLINE;
2860 		mutex_exit(&port->fp_mutex);
2861 		fctl_jobdone(job);
2862 		mutex_enter(&port->fp_mutex);
2863 		return (FC_OFFLINE);
2864 	}
2865 
2866 	if (state == FC_STATE_LOOP) {
2867 		port->fp_port_type.port_type = FC_NS_PORT_NL;
2868 		mutex_exit(&port->fp_mutex);
2869 
2870 		lilp_map = &port->fp_lilp_map;
2871 		if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2872 			job->job_result = FC_FAILURE;
2873 			fctl_jobdone(job);
2874 
2875 			FP_TRACE(FP_NHEAD1(9, rval),
2876 			    "LILP map Invalid or not present");
2877 			mutex_enter(&port->fp_mutex);
2878 			return (FC_FAILURE);
2879 		}
2880 
2881 		if (lilp_map->lilp_length == 0) {
2882 			job->job_result = FC_NO_MAP;
2883 			fctl_jobdone(job);
2884 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2885 			    "LILP map length zero");
2886 			mutex_enter(&port->fp_mutex);
2887 			return (FC_NO_MAP);
2888 		}
2889 		src_id = lilp_map->lilp_myalpa & 0xFF;
2890 	} else {
2891 		fc_remote_port_t	*pd;
2892 		fc_fca_pm_t		pm;
2893 		fc_fca_p2p_info_t	p2p_info;
2894 		int			pd_recepient;
2895 
2896 		/*
2897 		 * Get P2P remote port info if possible
2898 		 */
2899 		bzero((caddr_t)&pm, sizeof (pm));
2900 
2901 		pm.pm_cmd_flags = FC_FCA_PM_READ;
2902 		pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2903 		pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2904 		pm.pm_data_buf = (caddr_t)&p2p_info;
2905 
2906 		rval = port->fp_fca_tran->fca_port_manage(
2907 		    port->fp_fca_handle, &pm);
2908 
2909 		if (rval == FC_SUCCESS) {
2910 			port->fp_port_id.port_id = p2p_info.fca_d_id;
2911 			port->fp_port_type.port_type = FC_NS_PORT_N;
2912 			port->fp_topology = FC_TOP_PT_PT;
2913 			port->fp_total_devices = 1;
2914 			pd_recepient = fctl_wwn_cmp(
2915 			    &port->fp_service_params.nport_ww_name,
2916 			    &p2p_info.pwwn) < 0 ?
2917 			    PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2918 			mutex_exit(&port->fp_mutex);
2919 			pd = fctl_create_remote_port(port,
2920 			    &p2p_info.nwwn,
2921 			    &p2p_info.pwwn,
2922 			    p2p_info.d_id,
2923 			    pd_recepient, KM_NOSLEEP);
2924 			FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2925 			    " P2P port=%p pd=%p", port, pd);
2926 			mutex_enter(&port->fp_mutex);
2927 			return (FC_SUCCESS);
2928 		}
2929 		port->fp_port_type.port_type = FC_NS_PORT_N;
2930 		mutex_exit(&port->fp_mutex);
2931 		src_id = 0;
2932 	}
2933 
2934 	job->job_counter = 1;
2935 	job->job_result = FC_SUCCESS;
2936 
2937 	if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2938 	    KM_SLEEP)) != FC_SUCCESS) {
2939 		port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2940 		job->job_result = FC_FAILURE;
2941 		fctl_jobdone(job);
2942 
2943 		mutex_enter(&port->fp_mutex);
2944 		if (port->fp_statec_busy <= 1) {
2945 			mutex_exit(&port->fp_mutex);
2946 			fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
2947 			    "Couldn't transport FLOGI");
2948 			mutex_enter(&port->fp_mutex);
2949 		}
2950 		return (FC_FAILURE);
2951 	}
2952 
2953 	fp_jobwait(job);
2954 
2955 	mutex_enter(&port->fp_mutex);
2956 	if (job->job_result == FC_SUCCESS) {
2957 		if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2958 			mutex_exit(&port->fp_mutex);
2959 			fp_ns_init(port, job, KM_SLEEP);
2960 			mutex_enter(&port->fp_mutex);
2961 		}
2962 	} else {
2963 		if (state == FC_STATE_LOOP) {
2964 			port->fp_topology = FC_TOP_PRIVATE_LOOP;
2965 			port->fp_port_id.port_id =
2966 			    port->fp_lilp_map.lilp_myalpa & 0xFF;
2967 		}
2968 	}
2969 
2970 	FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
2971 	    port, job);
2972 
2973 	return (FC_SUCCESS);
2974 }
2975 
2976 
2977 /*
2978  * Perform ULP invocations following FC port startup
2979  */
2980 /* ARGSUSED */
2981 static void
2982 fp_startup_done(opaque_t arg, uchar_t result)
2983 {
2984 	fc_local_port_t *port = arg;
2985 
2986 	fp_attach_ulps(port, FC_CMD_ATTACH);
2987 
2988 	FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
2989 }
2990 
2991 
2992 /*
2993  * Perform ULP port attach
2994  */
2995 static void
2996 fp_ulp_port_attach(void *arg)
2997 {
2998 	fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
2999 	fc_local_port_t	 *port = att->att_port;
3000 
3001 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3002 	    " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3003 
3004 	fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3005 
3006 	if (att->att_need_pm_idle == B_TRUE) {
3007 		fctl_idle_port(port);
3008 	}
3009 
3010 	FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3011 	    " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3012 
3013 	mutex_enter(&att->att_port->fp_mutex);
3014 	att->att_port->fp_ulp_attach = 0;
3015 
3016 	port->fp_task = port->fp_last_task;
3017 	port->fp_last_task = FP_TASK_IDLE;
3018 
3019 	cv_signal(&att->att_port->fp_attach_cv);
3020 
3021 	mutex_exit(&att->att_port->fp_mutex);
3022 
3023 	kmem_free(att, sizeof (fp_soft_attach_t));
3024 }
3025 
3026 /*
3027  * Entry point to funnel all requests down to FCAs
3028  */
3029 static int
3030 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3031 {
3032 	int rval;
3033 
3034 	mutex_enter(&port->fp_mutex);
3035 	if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3036 	    (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3037 	    FC_STATE_OFFLINE))) {
3038 		/*
3039 		 * This means there is more than one state change
3040 		 * at this point of time - Since they are processed
3041 		 * serially, any processing of the current one should
3042 		 * be failed, failed and move up in processing the next
3043 		 */
3044 		cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3045 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3046 		if (cmd->cmd_job) {
3047 			/*
3048 			 * A state change that is going to be invalidated
3049 			 * by another one already in the port driver's queue
3050 			 * need not go up to all ULPs. This will minimize
3051 			 * needless processing and ripples in ULP modules
3052 			 */
3053 			cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3054 		}
3055 		mutex_exit(&port->fp_mutex);
3056 		return (FC_STATEC_BUSY);
3057 	}
3058 
3059 	if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3060 		cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3061 		cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3062 		mutex_exit(&port->fp_mutex);
3063 
3064 		return (FC_OFFLINE);
3065 	}
3066 	mutex_exit(&port->fp_mutex);
3067 
3068 	rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3069 	if (rval != FC_SUCCESS) {
3070 		if (rval == FC_TRAN_BUSY) {
3071 			cmd->cmd_retry_interval = fp_retry_delay;
3072 			rval = fp_retry_cmd(&cmd->cmd_pkt);
3073 			if (rval == FC_FAILURE) {
3074 				cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3075 			}
3076 		}
3077 	}
3078 
3079 	return (rval);
3080 }
3081 
3082 
3083 /*
3084  * Each time a timeout kicks in, walk the wait queue, decrement the
3085  * the retry_interval, when the retry_interval becomes less than
3086  * or equal to zero, re-transport the command: If the re-transport
3087  * fails with BUSY, enqueue the command in the wait queue.
3088  *
3089  * In order to prevent looping forever because of commands enqueued
3090  * from within this function itself, save the current tail pointer
3091  * (in cur_tail) and exit the loop after serving this command.
3092  */
3093 static void
3094 fp_resendcmd(void *port_handle)
3095 {
3096 	int		rval;
3097 	fc_local_port_t	*port;
3098 	fp_cmd_t 	*cmd;
3099 	fp_cmd_t	*cur_tail;
3100 
3101 	port = port_handle;
3102 	mutex_enter(&port->fp_mutex);
3103 	cur_tail = port->fp_wait_tail;
3104 	mutex_exit(&port->fp_mutex);
3105 
3106 	while ((cmd = fp_deque_cmd(port)) != NULL) {
3107 		cmd->cmd_retry_interval -= fp_retry_ticker;
3108 		/* Check if we are detaching */
3109 		if (port->fp_soft_state &
3110 		    (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3111 			cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3112 			cmd->cmd_pkt.pkt_reason = 0;
3113 			fp_iodone(cmd);
3114 		} else if (cmd->cmd_retry_interval <= 0) {
3115 			rval = cmd->cmd_transport(port->fp_fca_handle,
3116 			    &cmd->cmd_pkt);
3117 
3118 			if (rval != FC_SUCCESS) {
3119 				if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3120 					if (--cmd->cmd_retry_count) {
3121 						fp_enque_cmd(port, cmd);
3122 						if (cmd == cur_tail) {
3123 							break;
3124 						}
3125 						continue;
3126 					}
3127 					cmd->cmd_pkt.pkt_state =
3128 					    FC_PKT_TRAN_BSY;
3129 				} else {
3130 					cmd->cmd_pkt.pkt_state =
3131 					    FC_PKT_TRAN_ERROR;
3132 				}
3133 				cmd->cmd_pkt.pkt_reason = 0;
3134 				fp_iodone(cmd);
3135 			}
3136 		} else {
3137 			fp_enque_cmd(port, cmd);
3138 		}
3139 
3140 		if (cmd == cur_tail) {
3141 			break;
3142 		}
3143 	}
3144 
3145 	mutex_enter(&port->fp_mutex);
3146 	if (port->fp_wait_head) {
3147 		timeout_id_t tid;
3148 
3149 		mutex_exit(&port->fp_mutex);
3150 		tid = timeout(fp_resendcmd, (caddr_t)port,
3151 		    fp_retry_ticks);
3152 		mutex_enter(&port->fp_mutex);
3153 		port->fp_wait_tid = tid;
3154 	} else {
3155 		port->fp_wait_tid = NULL;
3156 	}
3157 	mutex_exit(&port->fp_mutex);
3158 }
3159 
3160 
3161 /*
3162  * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3163  *
3164  * Yes, as you can see below, cmd_retry_count is used here too.  That means
3165  * the retries for BUSY are less if there were transport failures (transport
3166  * failure means fca_transport failure). The goal is not to exceed overall
3167  * retries set in the cmd_retry_count (whatever may be the reason for retry)
3168  *
3169  * Return Values:
3170  *	FC_SUCCESS
3171  *	FC_FAILURE
3172  */
3173 static int
3174 fp_retry_cmd(fc_packet_t *pkt)
3175 {
3176 	fp_cmd_t *cmd;
3177 
3178 	cmd = pkt->pkt_ulp_private;
3179 
3180 	if (--cmd->cmd_retry_count) {
3181 		fp_enque_cmd(cmd->cmd_port, cmd);
3182 		return (FC_SUCCESS);
3183 	} else {
3184 		return (FC_FAILURE);
3185 	}
3186 }
3187 
3188 
3189 /*
3190  * Queue up FC packet for deferred retry
3191  */
3192 static void
3193 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3194 {
3195 	timeout_id_t tid;
3196 
3197 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3198 
3199 #ifdef	DEBUG
3200 	fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3201 	    "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3202 #endif
3203 
3204 	mutex_enter(&port->fp_mutex);
3205 	if (port->fp_wait_tail) {
3206 		port->fp_wait_tail->cmd_next = cmd;
3207 		port->fp_wait_tail = cmd;
3208 	} else {
3209 		ASSERT(port->fp_wait_head == NULL);
3210 		port->fp_wait_head = port->fp_wait_tail = cmd;
3211 		if (port->fp_wait_tid == NULL) {
3212 			mutex_exit(&port->fp_mutex);
3213 			tid = timeout(fp_resendcmd, (caddr_t)port,
3214 			    fp_retry_ticks);
3215 			mutex_enter(&port->fp_mutex);
3216 			port->fp_wait_tid = tid;
3217 		}
3218 	}
3219 	mutex_exit(&port->fp_mutex);
3220 }
3221 
3222 
3223 /*
3224  * Handle all RJT codes
3225  */
3226 static int
3227 fp_handle_reject(fc_packet_t *pkt)
3228 {
3229 	int 		rval = FC_FAILURE;
3230 	uchar_t		next_class;
3231 	fp_cmd_t 	*cmd;
3232 
3233 	cmd = pkt->pkt_ulp_private;
3234 
3235 	if (pkt->pkt_reason != FC_REASON_CLASS_NOT_SUPP) {
3236 		if (pkt->pkt_reason == FC_REASON_QFULL ||
3237 		    pkt->pkt_reason == FC_REASON_LOGICAL_BSY) {
3238 			cmd->cmd_retry_interval = fp_retry_delay;
3239 			rval = fp_retry_cmd(pkt);
3240 		}
3241 
3242 		return (rval);
3243 	}
3244 
3245 	next_class = fp_get_nextclass(cmd->cmd_port,
3246 	    FC_TRAN_CLASS(pkt->pkt_tran_flags));
3247 
3248 	if (next_class == FC_TRAN_CLASS_INVALID) {
3249 		return (rval);
3250 	}
3251 	pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3252 	pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3253 
3254 	rval = fp_sendcmd(cmd->cmd_port, cmd,
3255 	    cmd->cmd_port->fp_fca_handle);
3256 
3257 	if (rval != FC_SUCCESS) {
3258 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
3259 	}
3260 
3261 	return (rval);
3262 }
3263 
3264 
3265 /*
3266  * Return the next class of service supported by the FCA
3267  */
3268 static uchar_t
3269 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3270 {
3271 	uchar_t next_class;
3272 
3273 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3274 
3275 	switch (cur_class) {
3276 	case FC_TRAN_CLASS_INVALID:
3277 		if (port->fp_cos & FC_NS_CLASS1) {
3278 			next_class = FC_TRAN_CLASS1;
3279 			break;
3280 		}
3281 		/* FALLTHROUGH */
3282 
3283 	case FC_TRAN_CLASS1:
3284 		if (port->fp_cos & FC_NS_CLASS2) {
3285 			next_class = FC_TRAN_CLASS2;
3286 			break;
3287 		}
3288 		/* FALLTHROUGH */
3289 
3290 	case FC_TRAN_CLASS2:
3291 		if (port->fp_cos & FC_NS_CLASS3) {
3292 			next_class = FC_TRAN_CLASS3;
3293 			break;
3294 		}
3295 		/* FALLTHROUGH */
3296 
3297 	case FC_TRAN_CLASS3:
3298 	default:
3299 		next_class = FC_TRAN_CLASS_INVALID;
3300 		break;
3301 	}
3302 
3303 	return (next_class);
3304 }
3305 
3306 
3307 /*
3308  * Determine if a class of service is supported by the FCA
3309  */
3310 static int
3311 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3312 {
3313 	int rval;
3314 
3315 	switch (tran_class) {
3316 	case FC_TRAN_CLASS1:
3317 		if (cos & FC_NS_CLASS1) {
3318 			rval = FC_SUCCESS;
3319 		} else {
3320 			rval = FC_FAILURE;
3321 		}
3322 		break;
3323 
3324 	case FC_TRAN_CLASS2:
3325 		if (cos & FC_NS_CLASS2) {
3326 			rval = FC_SUCCESS;
3327 		} else {
3328 			rval = FC_FAILURE;
3329 		}
3330 		break;
3331 
3332 	case FC_TRAN_CLASS3:
3333 		if (cos & FC_NS_CLASS3) {
3334 			rval = FC_SUCCESS;
3335 		} else {
3336 			rval = FC_FAILURE;
3337 		}
3338 		break;
3339 
3340 	default:
3341 		rval = FC_FAILURE;
3342 		break;
3343 	}
3344 
3345 	return (rval);
3346 }
3347 
3348 
3349 /*
3350  * Dequeue FC packet for retry
3351  */
3352 static fp_cmd_t *
3353 fp_deque_cmd(fc_local_port_t *port)
3354 {
3355 	fp_cmd_t *cmd;
3356 
3357 	ASSERT(!MUTEX_HELD(&port->fp_mutex));
3358 
3359 	mutex_enter(&port->fp_mutex);
3360 
3361 	if (port->fp_wait_head == NULL) {
3362 		/*
3363 		 * To avoid races, NULL the fp_wait_tid as
3364 		 * we are about to exit the timeout thread.
3365 		 */
3366 		port->fp_wait_tid = NULL;
3367 		mutex_exit(&port->fp_mutex);
3368 		return (NULL);
3369 	}
3370 
3371 	cmd = port->fp_wait_head;
3372 	port->fp_wait_head = cmd->cmd_next;
3373 	cmd->cmd_next = NULL;
3374 
3375 	if (port->fp_wait_head == NULL) {
3376 		port->fp_wait_tail = NULL;
3377 	}
3378 	mutex_exit(&port->fp_mutex);
3379 
3380 	return (cmd);
3381 }
3382 
3383 
3384 /*
3385  * Wait for job completion
3386  */
3387 static void
3388 fp_jobwait(job_request_t *job)
3389 {
3390 	sema_p(&job->job_port_sema);
3391 }
3392 
3393 
3394 /*
3395  * Convert FC packet state to FC errno
3396  */
3397 int
3398 fp_state_to_rval(uchar_t state)
3399 {
3400 	int count;
3401 
3402 	for (count = 0; count < sizeof (fp_xlat) /
3403 	sizeof (fp_xlat[0]); count++) {
3404 		if (fp_xlat[count].xlat_state == state) {
3405 			return (fp_xlat[count].xlat_rval);
3406 		}
3407 	}
3408 
3409 	return (FC_FAILURE);
3410 }
3411 
3412 
3413 /*
3414  * For Synchronous I/O requests, the caller is
3415  * expected to do fctl_jobdone(if necessary)
3416  *
3417  * We want to preserve at least one failure in the
3418  * job_result if it happens.
3419  *
3420  */
3421 static void
3422 fp_iodone(fp_cmd_t *cmd)
3423 {
3424 	fc_packet_t		*ulp_pkt = cmd->cmd_ulp_pkt;
3425 	job_request_t 		*job = cmd->cmd_job;
3426 	fc_remote_port_t 	*pd = cmd->cmd_pkt.pkt_pd;
3427 
3428 	ASSERT(job != NULL);
3429 	ASSERT(cmd->cmd_port != NULL);
3430 	ASSERT(&cmd->cmd_pkt != NULL);
3431 
3432 	mutex_enter(&job->job_mutex);
3433 	if (job->job_result == FC_SUCCESS) {
3434 		job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3435 	}
3436 	mutex_exit(&job->job_mutex);
3437 
3438 	if (pd) {
3439 		mutex_enter(&pd->pd_mutex);
3440 		pd->pd_flags = PD_IDLE;
3441 		mutex_exit(&pd->pd_mutex);
3442 	}
3443 
3444 	if (ulp_pkt) {
3445 		if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3446 		    FP_IS_PKT_ERROR(ulp_pkt)) {
3447 			fc_local_port_t 	*port;
3448 			fc_remote_node_t	*node;
3449 
3450 			port = cmd->cmd_port;
3451 
3452 			mutex_enter(&pd->pd_mutex);
3453 			pd->pd_state = PORT_DEVICE_INVALID;
3454 			pd->pd_ref_count--;
3455 			node = pd->pd_remote_nodep;
3456 			mutex_exit(&pd->pd_mutex);
3457 
3458 			ASSERT(node != NULL);
3459 			ASSERT(port != NULL);
3460 
3461 			if (fctl_destroy_remote_port(port, pd) == 0) {
3462 				fctl_destroy_remote_node(node);
3463 			}
3464 
3465 			ulp_pkt->pkt_pd = NULL;
3466 		}
3467 
3468 		ulp_pkt->pkt_comp(ulp_pkt);
3469 	}
3470 
3471 	fp_free_pkt(cmd);
3472 	fp_jobdone(job);
3473 }
3474 
3475 
3476 /*
3477  * Job completion handler
3478  */
3479 static void
3480 fp_jobdone(job_request_t *job)
3481 {
3482 	mutex_enter(&job->job_mutex);
3483 	ASSERT(job->job_counter > 0);
3484 
3485 	if (--job->job_counter != 0) {
3486 		mutex_exit(&job->job_mutex);
3487 		return;
3488 	}
3489 
3490 	if (job->job_ulp_pkts) {
3491 		ASSERT(job->job_ulp_listlen > 0);
3492 		kmem_free(job->job_ulp_pkts,
3493 		    sizeof (fc_packet_t *) * job->job_ulp_listlen);
3494 	}
3495 
3496 	if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3497 		mutex_exit(&job->job_mutex);
3498 		fctl_jobdone(job);
3499 	} else {
3500 		mutex_exit(&job->job_mutex);
3501 		sema_v(&job->job_port_sema);
3502 	}
3503 }
3504 
3505 
3506 /*
3507  * Try to perform shutdown of a port during a detach. No return
3508  * value since the detach should not fail because the port shutdown
3509  * failed.
3510  */
3511 static void
3512 fp_port_shutdown(fc_local_port_t *port, job_request_t *job)
3513 {
3514 	int			index;
3515 	int			count;
3516 	int			flags;
3517 	fp_cmd_t 		*cmd;
3518 	struct pwwn_hash 	*head;
3519 	fc_remote_port_t 	*pd;
3520 
3521 	ASSERT(MUTEX_HELD(&port->fp_mutex));
3522 
3523 	job->job_result = FC_SUCCESS;
3524 
3525 	if (port->fp_taskq) {
3526 		/*
3527 		 * We must release the mutex here to ensure that other
3528 		 * potential jobs can complete their processing.  Many
3529 		 * also need this mutex.
3530 		 */
3531 		mutex_exit(&port->fp_mutex);
3532 		taskq_wait(port->fp_taskq);
3533 		mutex_enter(&port->fp_mutex);
3534 	}
3535 
3536 	if (port->fp_offline_tid) {
3537 		timeout_id_t tid;
3538 
3539 		tid = port->fp_offline_tid;
3540 		port->fp_offline_tid = NULL;
3541 		mutex_exit(&port->fp_mutex);
3542 		(void) untimeout(tid);
3543 		mutex_enter(&port->fp_mutex);
3544 	}
3545 
3546 	if (port->fp_wait_tid) {
3547 		timeout_id_t tid;
3548 
3549 		tid = port->fp_wait_tid;
3550 		port->fp_wait_tid = NULL;
3551 		mutex_exit(&port->fp_mutex);
3552 		(void) untimeout(tid);
3553 	} else {
3554 		mutex_exit(&port->fp_mutex);
3555 	}
3556 
3557 	/*
3558 	 * While we cancel the timeout, let's also return the
3559 	 * the outstanding requests back to the callers.
3560 	 */
3561 	while ((cmd = fp_deque_cmd(port)) != NULL) {
3562 		ASSERT(cmd->cmd_job != NULL);
3563 		cmd->cmd_job->job_result = FC_OFFLINE;
3564 		fp_iodone(cmd);
3565 	}
3566 
3567 	/*
3568 	 * Gracefully LOGO with all the devices logged in.
3569 	 */
3570 	mutex_enter(&port->fp_mutex);
3571 
3572 	for (count = index = 0; index < pwwn_table_size; index++) {
3573 		head = &port->fp_pwwn_table[index];
3574 		pd = head->pwwn_head;
3575 		while (pd != NULL) {
3576 			mutex_enter(&pd->pd_mutex);
3577 			if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3578 				count++;
3579 			}
3580 			mutex_exit(&pd->pd_mutex);
3581 			pd = pd->pd_wwn_hnext;
3582 		}
3583 	}
3584 
3585 	if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3586 		flags = job->job_flags;
3587 		job->job_flags &= ~JOB_TYPE_FP_ASYNC;
3588 	} else {
3589 		flags = 0;
3590 	}
3591 	if (count) {
3592 		job->job_counter = count;
3593 
3594 		for (index = 0; index < pwwn_table_size; index++) {
3595 			head = &port->fp_pwwn_table[