1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2020 RackTop Systems, Inc.
24 *
25 * NOT a DDI compliant Sun Fibre Channel port driver(fp)
26 *
27 */
28
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/uio.h>
34 #include <sys/buf.h>
35 #include <sys/modctl.h>
36 #include <sys/open.h>
37 #include <sys/file.h>
38 #include <sys/kmem.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/thread.h>
42 #include <sys/var.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stat.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/promif.h>
48 #include <sys/nvpair.h>
49 #include <sys/byteorder.h>
50 #include <sys/scsi/scsi.h>
51 #include <sys/fibre-channel/fc.h>
52 #include <sys/fibre-channel/impl/fc_ulpif.h>
53 #include <sys/fibre-channel/impl/fc_fcaif.h>
54 #include <sys/fibre-channel/impl/fctl_private.h>
55 #include <sys/fibre-channel/impl/fc_portif.h>
56 #include <sys/fibre-channel/impl/fp.h>
57
58 /* These are defined in fctl.c! */
59 extern int did_table_size;
60 extern int pwwn_table_size;
61
62 static struct cb_ops fp_cb_ops = {
63 fp_open, /* open */
64 fp_close, /* close */
65 nodev, /* strategy */
66 nodev, /* print */
67 nodev, /* dump */
68 nodev, /* read */
69 nodev, /* write */
70 fp_ioctl, /* ioctl */
71 nodev, /* devmap */
72 nodev, /* mmap */
73 nodev, /* segmap */
74 nochpoll, /* chpoll */
75 ddi_prop_op, /* cb_prop_op */
76 0, /* streamtab */
77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
78 CB_REV, /* rev */
79 nodev, /* aread */
80 nodev /* awrite */
81 };
82
83 static struct dev_ops fp_ops = {
84 DEVO_REV, /* build revision */
85 0, /* reference count */
86 fp_getinfo, /* getinfo */
87 nulldev, /* identify - Obsoleted */
88 nulldev, /* probe */
89 fp_attach, /* attach */
90 fp_detach, /* detach */
91 nodev, /* reset */
92 &fp_cb_ops, /* cb_ops */
93 NULL, /* bus_ops */
94 fp_power, /* power */
95 ddi_quiesce_not_needed /* quiesce */
96 };
97
98 #define FP_VERSION "20091123-1.101"
99 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION
100
101 char *fp_version = FP_NAME_VERSION;
102
103 static struct modldrv modldrv = {
104 &mod_driverops, /* Type of Module */
105 FP_NAME_VERSION, /* Name/Version of fp */
106 &fp_ops /* driver ops */
107 };
108
109 static struct modlinkage modlinkage = {
110 MODREV_1, /* Rev of the loadable modules system */
111 &modldrv, /* NULL terminated list of */
112 NULL /* Linkage structures */
113 };
114
115
116
117 static uint16_t ns_reg_cmds[] = {
118 NS_RPN_ID,
119 NS_RNN_ID,
120 NS_RCS_ID,
121 NS_RFT_ID,
122 NS_RPT_ID,
123 NS_RSPN_ID,
124 NS_RSNN_NN
125 };
126
127 struct fp_xlat {
128 uchar_t xlat_state;
129 int xlat_rval;
130 } fp_xlat [] = {
131 { FC_PKT_SUCCESS, FC_SUCCESS },
132 { FC_PKT_REMOTE_STOP, FC_FAILURE },
133 { FC_PKT_LOCAL_RJT, FC_FAILURE },
134 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT },
135 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT },
136 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY },
137 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY },
138 { FC_PKT_NPORT_BSY, FC_PBUSY },
139 { FC_PKT_FABRIC_BSY, FC_FBUSY },
140 { FC_PKT_LS_RJT, FC_FAILURE },
141 { FC_PKT_BA_RJT, FC_FAILURE },
142 { FC_PKT_TIMEOUT, FC_FAILURE },
143 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR },
144 { FC_PKT_FAILURE, FC_FAILURE },
145 { FC_PKT_PORT_OFFLINE, FC_OFFLINE }
146 };
147
148 static uchar_t fp_valid_alpas[] = {
149 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
150 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
151 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
152 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
153 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
154 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
155 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
156 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
157 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
158 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
159 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
160 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
161 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
162 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
163 };
164
165 static struct fp_perms {
166 uint16_t fp_ioctl_cmd;
167 uchar_t fp_open_flag;
168 } fp_perm_list [] = {
169 { FCIO_GET_NUM_DEVS, FP_OPEN },
170 { FCIO_GET_DEV_LIST, FP_OPEN },
171 { FCIO_GET_SYM_PNAME, FP_OPEN },
172 { FCIO_GET_SYM_NNAME, FP_OPEN },
173 { FCIO_SET_SYM_PNAME, FP_EXCL },
174 { FCIO_SET_SYM_NNAME, FP_EXCL },
175 { FCIO_GET_LOGI_PARAMS, FP_OPEN },
176 { FCIO_DEV_LOGIN, FP_EXCL },
177 { FCIO_DEV_LOGOUT, FP_EXCL },
178 { FCIO_GET_STATE, FP_OPEN },
179 { FCIO_DEV_REMOVE, FP_EXCL },
180 { FCIO_GET_FCODE_REV, FP_OPEN },
181 { FCIO_GET_FW_REV, FP_OPEN },
182 { FCIO_GET_DUMP_SIZE, FP_OPEN },
183 { FCIO_FORCE_DUMP, FP_EXCL },
184 { FCIO_GET_DUMP, FP_OPEN },
185 { FCIO_GET_TOPOLOGY, FP_OPEN },
186 { FCIO_RESET_LINK, FP_EXCL },
187 { FCIO_RESET_HARD, FP_EXCL },
188 { FCIO_RESET_HARD_CORE, FP_EXCL },
189 { FCIO_DIAG, FP_OPEN },
190 { FCIO_NS, FP_EXCL },
191 { FCIO_DOWNLOAD_FW, FP_EXCL },
192 { FCIO_DOWNLOAD_FCODE, FP_EXCL },
193 { FCIO_LINK_STATUS, FP_OPEN },
194 { FCIO_GET_HOST_PARAMS, FP_OPEN },
195 { FCIO_GET_NODE_ID, FP_OPEN },
196 { FCIO_SET_NODE_ID, FP_EXCL },
197 { FCIO_SEND_NODE_ID, FP_OPEN },
198 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN },
199 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN },
200 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN },
201 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN },
202 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN },
203 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN },
204 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
205 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
206 { FCIO_DELETE_NPIV_PORT, FP_OPEN },
207 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
208 { FCIO_CREATE_NPIV_PORT, FP_OPEN },
209 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
210 };
211
212 static char *fp_pm_comps[] = {
213 "NAME=FC Port",
214 "0=Port Down",
215 "1=Port Up"
216 };
217
218
219 #ifdef _LITTLE_ENDIAN
220 #define MAKE_BE_32(x) { \
221 uint32_t *ptr1, i; \
222 ptr1 = (uint32_t *)(x); \
223 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
224 *ptr1 = BE_32(*ptr1); \
225 ptr1++; \
226 } \
227 }
228 #else
229 #define MAKE_BE_32(x)
230 #endif
231
232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
233 static uint32_t fp_options = 0;
234
235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
236 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */
237 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */
238 unsigned int fp_offline_ticker; /* seconds */
239
240 /*
241 * Driver global variable to anchor the list of soft state structs for
242 * all fp driver instances. Used with the Solaris DDI soft state functions.
243 */
244 static void *fp_driver_softstate;
245
246 static clock_t fp_retry_ticks;
247 static clock_t fp_offline_ticks;
248
249 static int fp_retry_ticker;
250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
252
253 static int fp_log_size = FP_LOG_SIZE;
254 static int fp_trace = FP_TRACE_DEFAULT;
255 static fc_trace_logq_t *fp_logq = NULL;
256
257 int fp_get_adapter_paths(char *pathList, int count);
258 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
259 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
260 la_wwn_t tgt_pwwn, uint32_t port_id);
261 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
262 static void fp_init_symbolic_names(fc_local_port_t *port);
263
264
265 /*
266 * Perform global initialization
267 */
268 int
_init(void)269 _init(void)
270 {
271 int ret;
272
273 if ((ret = ddi_soft_state_init(&fp_driver_softstate,
274 sizeof (struct fc_local_port), 8)) != 0) {
275 return (ret);
276 }
277
278 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
279 ddi_soft_state_fini(&fp_driver_softstate);
280 return (ret);
281 }
282
283 fp_logq = fc_trace_alloc_logq(fp_log_size);
284
285 if ((ret = mod_install(&modlinkage)) != 0) {
286 fc_trace_free_logq(fp_logq);
287 ddi_soft_state_fini(&fp_driver_softstate);
288 scsi_hba_fini(&modlinkage);
289 }
290
291 return (ret);
292 }
293
294
295 /*
296 * Prepare for driver unload
297 */
298 int
_fini(void)299 _fini(void)
300 {
301 int ret;
302
303 if ((ret = mod_remove(&modlinkage)) == 0) {
304 fc_trace_free_logq(fp_logq);
305 ddi_soft_state_fini(&fp_driver_softstate);
306 scsi_hba_fini(&modlinkage);
307 }
308
309 return (ret);
310 }
311
312
313 /*
314 * Request mod_info() to handle all cases
315 */
316 int
_info(struct modinfo * modinfo)317 _info(struct modinfo *modinfo)
318 {
319 return (mod_info(&modlinkage, modinfo));
320 }
321
322
323 /*
324 * fp_attach:
325 *
326 * The respective cmd handlers take care of performing
327 * ULP related invocations
328 */
329 static int
fp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
331 {
332 int rval;
333
334 /*
335 * We check the value of fp_offline_ticker at this
336 * point. The variable is global for the driver and
337 * not specific to an instance.
338 *
339 * If there is no user-defined value found in /etc/system
340 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
341 * The minimum setting for this offline timeout according
342 * to the FC-FS2 standard (Fibre Channel Framing and
343 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
344 *
345 * We do not recommend setting the value to less than 10
346 * seconds (RA_TOV) or more than 90 seconds. If this
347 * variable is greater than 90 seconds then drivers above
348 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
349 */
350
351 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
352 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
353 FP_OFFLINE_TICKER);
354
355 if ((fp_offline_ticker < 10) ||
356 (fp_offline_ticker > 90)) {
357 cmn_err(CE_WARN, "Setting fp_offline_ticker to "
358 "%d second(s). This is outside the "
359 "recommended range of 10..90 seconds",
360 fp_offline_ticker);
361 }
362
363 /*
364 * Tick every second when there are commands to retry.
365 * It should tick at the least granular value of pkt_timeout
366 * (which is one second)
367 */
368 fp_retry_ticker = 1;
369
370 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
371 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
372
373 switch (cmd) {
374 case DDI_ATTACH:
375 rval = fp_attach_handler(dip);
376 break;
377
378 case DDI_RESUME:
379 rval = fp_resume_handler(dip);
380 break;
381
382 default:
383 rval = DDI_FAILURE;
384 break;
385 }
386 return (rval);
387 }
388
389
390 /*
391 * fp_detach:
392 *
393 * If a ULP fails to handle cmd request converse of
394 * cmd is invoked for ULPs that previously succeeded
395 * cmd request.
396 */
397 static int
fp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
399 {
400 int rval = DDI_FAILURE;
401 fc_local_port_t *port;
402 fc_attach_cmd_t converse;
403 uint8_t cnt;
404
405 if ((port = ddi_get_soft_state(fp_driver_softstate,
406 ddi_get_instance(dip))) == NULL) {
407 return (DDI_FAILURE);
408 }
409
410 mutex_enter(&port->fp_mutex);
411
412 if (port->fp_ulp_attach) {
413 mutex_exit(&port->fp_mutex);
414 return (DDI_FAILURE);
415 }
416
417 switch (cmd) {
418 case DDI_DETACH:
419 if (port->fp_task != FP_TASK_IDLE) {
420 mutex_exit(&port->fp_mutex);
421 return (DDI_FAILURE);
422 }
423
424 /* Let's attempt to quit the job handler gracefully */
425 port->fp_soft_state |= FP_DETACH_INPROGRESS;
426
427 mutex_exit(&port->fp_mutex);
428 converse = FC_CMD_ATTACH;
429 if (fctl_detach_ulps(port, FC_CMD_DETACH,
430 &modlinkage) != FC_SUCCESS) {
431 mutex_enter(&port->fp_mutex);
432 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
433 mutex_exit(&port->fp_mutex);
434 rval = DDI_FAILURE;
435 break;
436 }
437
438 mutex_enter(&port->fp_mutex);
439 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
440 cnt++) {
441 mutex_exit(&port->fp_mutex);
442 delay(drv_usectohz(1000000));
443 mutex_enter(&port->fp_mutex);
444 }
445
446 if (port->fp_job_head) {
447 mutex_exit(&port->fp_mutex);
448 rval = DDI_FAILURE;
449 break;
450 }
451 mutex_exit(&port->fp_mutex);
452
453 rval = fp_detach_handler(port);
454 break;
455
456 case DDI_SUSPEND:
457 mutex_exit(&port->fp_mutex);
458 converse = FC_CMD_RESUME;
459 if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
460 &modlinkage) != FC_SUCCESS) {
461 rval = DDI_FAILURE;
462 break;
463 }
464 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
465 (void) callb_generic_cpr(&port->fp_cpr_info,
466 CB_CODE_CPR_RESUME);
467 }
468 break;
469
470 default:
471 mutex_exit(&port->fp_mutex);
472 break;
473 }
474
475 /*
476 * Use softint to perform reattach. Mark fp_ulp_attach so we
477 * don't attempt to do this repeatedly on behalf of some persistent
478 * caller.
479 */
480 if (rval != DDI_SUCCESS) {
481 mutex_enter(&port->fp_mutex);
482 port->fp_ulp_attach = 1;
483
484 /*
485 * If the port is in the low power mode then there is
486 * possibility that fca too could be in low power mode.
487 * Try to raise the power before calling attach ulps.
488 */
489
490 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
491 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
492 mutex_exit(&port->fp_mutex);
493 (void) pm_raise_power(port->fp_port_dip,
494 FP_PM_COMPONENT, FP_PM_PORT_UP);
495 } else {
496 mutex_exit(&port->fp_mutex);
497 }
498
499
500 fp_attach_ulps(port, converse);
501
502 mutex_enter(&port->fp_mutex);
503 while (port->fp_ulp_attach) {
504 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
505 }
506
507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
508
509 /*
510 * Mark state as detach failed so asynchronous ULP attach
511 * events (downstream, not the ones we're initiating with
512 * the call to fp_attach_ulps) are not honored. We're
513 * really still in pending detach.
514 */
515 port->fp_soft_state |= FP_DETACH_FAILED;
516
517 mutex_exit(&port->fp_mutex);
518 }
519
520 return (rval);
521 }
522
523
524 /*
525 * fp_getinfo:
526 * Given the device number, return either the
527 * dev_info_t pointer or the instance number.
528 */
529
530 /* ARGSUSED */
531 static int
fp_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
533 {
534 int rval;
535 minor_t instance;
536 fc_local_port_t *port;
537
538 rval = DDI_SUCCESS;
539 instance = getminor((dev_t)arg);
540
541 switch (cmd) {
542 case DDI_INFO_DEVT2DEVINFO:
543 if ((port = ddi_get_soft_state(fp_driver_softstate,
544 instance)) == NULL) {
545 rval = DDI_FAILURE;
546 break;
547 }
548 *result = (void *)port->fp_port_dip;
549 break;
550
551 case DDI_INFO_DEVT2INSTANCE:
552 *result = (void *)(uintptr_t)instance;
553 break;
554
555 default:
556 rval = DDI_FAILURE;
557 break;
558 }
559
560 return (rval);
561 }
562
563
564 /*
565 * Entry point for power up and power down request from kernel
566 */
567 static int
fp_power(dev_info_t * dip,int comp,int level)568 fp_power(dev_info_t *dip, int comp, int level)
569 {
570 int rval = DDI_FAILURE;
571 fc_local_port_t *port;
572
573 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
574 if (port == NULL || comp != FP_PM_COMPONENT) {
575 return (rval);
576 }
577
578 switch (level) {
579 case FP_PM_PORT_UP:
580 rval = DDI_SUCCESS;
581
582 /*
583 * If the port is DDI_SUSPENDed, let the DDI_RESUME
584 * code complete the rediscovery.
585 */
586 mutex_enter(&port->fp_mutex);
587 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
588 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
589 port->fp_pm_level = FP_PM_PORT_UP;
590 mutex_exit(&port->fp_mutex);
591 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
592 break;
593 }
594
595 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
596 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
597
598 port->fp_pm_level = FP_PM_PORT_UP;
599 rval = fp_power_up(port);
600 if (rval != DDI_SUCCESS) {
601 port->fp_pm_level = FP_PM_PORT_DOWN;
602 }
603 } else {
604 port->fp_pm_level = FP_PM_PORT_UP;
605 }
606 mutex_exit(&port->fp_mutex);
607 break;
608
609 case FP_PM_PORT_DOWN:
610 mutex_enter(&port->fp_mutex);
611
612 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
613 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
614 /*
615 * PM framework goofed up. We have don't
616 * have any PM components. Let's never go down.
617 */
618 mutex_exit(&port->fp_mutex);
619 break;
620
621 }
622
623 if (port->fp_ulp_attach) {
624 /* We shouldn't let the power go down */
625 mutex_exit(&port->fp_mutex);
626 break;
627 }
628
629 /*
630 * Not a whole lot to do if we are detaching
631 */
632 if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
633 port->fp_pm_level = FP_PM_PORT_DOWN;
634 mutex_exit(&port->fp_mutex);
635 rval = DDI_SUCCESS;
636 break;
637 }
638
639 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
640 port->fp_pm_level = FP_PM_PORT_DOWN;
641
642 rval = fp_power_down(port);
643 if (rval != DDI_SUCCESS) {
644 port->fp_pm_level = FP_PM_PORT_UP;
645 ASSERT(!(port->fp_soft_state &
646 FP_SOFT_POWER_DOWN));
647 } else {
648 ASSERT(port->fp_soft_state &
649 FP_SOFT_POWER_DOWN);
650 }
651 }
652 mutex_exit(&port->fp_mutex);
653 break;
654
655 default:
656 break;
657 }
658
659 return (rval);
660 }
661
662
663 /*
664 * Open FC port devctl node
665 */
666 static int
fp_open(dev_t * devp,int flag,int otype,cred_t * credp)667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
668 {
669 int instance;
670 fc_local_port_t *port;
671
672 if (otype != OTYP_CHR) {
673 return (EINVAL);
674 }
675
676 /*
677 * This is not a toy to play with. Allow only powerful
678 * users (hopefully knowledgeable) to access the port
679 * (A hacker potentially could download a sick binary
680 * file into FCA)
681 */
682 if (drv_priv(credp)) {
683 return (EPERM);
684 }
685
686 instance = (int)getminor(*devp);
687
688 port = ddi_get_soft_state(fp_driver_softstate, instance);
689 if (port == NULL) {
690 return (ENXIO);
691 }
692
693 mutex_enter(&port->fp_mutex);
694 if (port->fp_flag & FP_EXCL) {
695 /*
696 * It is already open for exclusive access.
697 * So shut the door on this caller.
698 */
699 mutex_exit(&port->fp_mutex);
700 return (EBUSY);
701 }
702
703 if (flag & FEXCL) {
704 if (port->fp_flag & FP_OPEN) {
705 /*
706 * Exclusive operation not possible
707 * as it is already opened
708 */
709 mutex_exit(&port->fp_mutex);
710 return (EBUSY);
711 }
712 port->fp_flag |= FP_EXCL;
713 }
714 port->fp_flag |= FP_OPEN;
715 mutex_exit(&port->fp_mutex);
716
717 return (0);
718 }
719
720
721 /*
722 * The driver close entry point is called on the last close()
723 * of a device. So it is perfectly alright to just clobber the
724 * open flag and reset it to idle (instead of having to reset
725 * each flag bits). For any confusion, check out close(9E).
726 */
727
728 /* ARGSUSED */
729 static int
fp_close(dev_t dev,int flag,int otype,cred_t * credp)730 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
731 {
732 int instance;
733 fc_local_port_t *port;
734
735 if (otype != OTYP_CHR) {
736 return (EINVAL);
737 }
738
739 instance = (int)getminor(dev);
740
741 port = ddi_get_soft_state(fp_driver_softstate, instance);
742 if (port == NULL) {
743 return (ENXIO);
744 }
745
746 mutex_enter(&port->fp_mutex);
747 if ((port->fp_flag & FP_OPEN) == 0) {
748 mutex_exit(&port->fp_mutex);
749 return (ENODEV);
750 }
751 port->fp_flag = FP_IDLE;
752 mutex_exit(&port->fp_mutex);
753
754 return (0);
755 }
756
757 /*
758 * Handle IOCTL requests
759 */
760
761 /* ARGSUSED */
762 static int
fp_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
764 {
765 int instance;
766 int ret = 0;
767 fcio_t fcio;
768 fc_local_port_t *port;
769
770 instance = (int)getminor(dev);
771
772 port = ddi_get_soft_state(fp_driver_softstate, instance);
773 if (port == NULL) {
774 return (ENXIO);
775 }
776
777 mutex_enter(&port->fp_mutex);
778 if ((port->fp_flag & FP_OPEN) == 0) {
779 mutex_exit(&port->fp_mutex);
780 return (ENXIO);
781 }
782
783 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
784 mutex_exit(&port->fp_mutex);
785 return (ENXIO);
786 }
787
788 mutex_exit(&port->fp_mutex);
789
790 /* this will raise power if necessary */
791 ret = fctl_busy_port(port);
792 if (ret != 0) {
793 return (ret);
794 }
795
796 ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
797
798
799 switch (cmd) {
800 case FCIO_CMD: {
801 #ifdef _MULTI_DATAMODEL
802 switch (ddi_model_convert_from(mode & FMODELS)) {
803 case DDI_MODEL_ILP32: {
804 struct fcio32 fcio32;
805
806 if (ddi_copyin((void *)data, (void *)&fcio32,
807 sizeof (struct fcio32), mode)) {
808 ret = EFAULT;
809 break;
810 }
811 fcio.fcio_xfer = fcio32.fcio_xfer;
812 fcio.fcio_cmd = fcio32.fcio_cmd;
813 fcio.fcio_flags = fcio32.fcio_flags;
814 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
815 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
816 fcio.fcio_ibuf =
817 (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
818 fcio.fcio_olen = (size_t)fcio32.fcio_olen;
819 fcio.fcio_obuf =
820 (caddr_t)(uintptr_t)fcio32.fcio_obuf;
821 fcio.fcio_alen = (size_t)fcio32.fcio_alen;
822 fcio.fcio_abuf =
823 (caddr_t)(uintptr_t)fcio32.fcio_abuf;
824 fcio.fcio_errno = fcio32.fcio_errno;
825 break;
826 }
827
828 case DDI_MODEL_NONE:
829 if (ddi_copyin((void *)data, (void *)&fcio,
830 sizeof (fcio_t), mode)) {
831 ret = EFAULT;
832 }
833 break;
834 }
835 #else /* _MULTI_DATAMODEL */
836 if (ddi_copyin((void *)data, (void *)&fcio,
837 sizeof (fcio_t), mode)) {
838 ret = EFAULT;
839 break;
840 }
841 #endif /* _MULTI_DATAMODEL */
842 if (!ret) {
843 ret = fp_fciocmd(port, data, mode, &fcio);
844 }
845 break;
846 }
847
848 default:
849 ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
850 mode, credp, rval);
851 }
852
853 fctl_idle_port(port);
854
855 return (ret);
856 }
857
858
859 /*
860 * Init Symbolic Port Name and Node Name
861 * LV will try to get symbolic names from FCA driver
862 * and register these to name server,
863 * if LV fails to get these,
864 * LV will register its default symbolic names to name server.
865 * The Default symbolic node name format is :
866 * <hostname>:<hba driver name>(instance)
867 * The Default symbolic port name format is :
868 * <fp path name>
869 */
870 static void
fp_init_symbolic_names(fc_local_port_t * port)871 fp_init_symbolic_names(fc_local_port_t *port)
872 {
873 const char *vendorname = ddi_driver_name(port->fp_fca_dip);
874 char *sym_name;
875 char fcaname[50] = {0};
876 int hostnlen, fcanlen;
877
878 if (port->fp_sym_node_namelen == 0) {
879 hostnlen = strlen(utsname.nodename);
880 (void) snprintf(fcaname, sizeof (fcaname),
881 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
882 fcanlen = strlen(fcaname);
883
884 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
885 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
886 port->fp_sym_node_namelen = strlen(sym_name);
887 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
888 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
889 }
890 (void) strncpy(port->fp_sym_node_name, sym_name,
891 port->fp_sym_node_namelen);
892 kmem_free(sym_name, hostnlen + fcanlen + 2);
893 }
894
895 if (port->fp_sym_port_namelen == 0) {
896 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
897
898 (void) ddi_pathname(port->fp_port_dip, pathname);
899 port->fp_sym_port_namelen = strlen(pathname);
900 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
901 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
902 }
903 (void) strncpy(port->fp_sym_port_name, pathname,
904 port->fp_sym_port_namelen);
905 kmem_free(pathname, MAXPATHLEN);
906 }
907 }
908
909
910 /*
911 * Perform port attach
912 */
913 static int
fp_attach_handler(dev_info_t * dip)914 fp_attach_handler(dev_info_t *dip)
915 {
916 int rval;
917 int instance;
918 int port_num;
919 int port_len;
920 char name[30];
921 char i_pwwn[17];
922 fp_cmd_t *pkt;
923 uint32_t ub_count;
924 fc_local_port_t *port;
925 job_request_t *job;
926 fc_local_port_t *phyport = NULL;
927 int portpro1;
928 char pwwn[17], nwwn[17];
929
930 instance = ddi_get_instance(dip);
931 port_len = sizeof (port_num);
932 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
933 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
934 (caddr_t)&port_num, &port_len);
935 if (rval != DDI_SUCCESS) {
936 cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
937 instance);
938 return (DDI_FAILURE);
939 }
940
941 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
942 DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
943 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
944 instance);
945 return (DDI_FAILURE);
946 }
947
948 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
949 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
950 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
951 " point minor node", instance);
952 ddi_remove_minor_node(dip, NULL);
953 return (DDI_FAILURE);
954 }
955
956 if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
957 != DDI_SUCCESS) {
958 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
959 instance);
960 ddi_remove_minor_node(dip, NULL);
961 return (DDI_FAILURE);
962 }
963 port = ddi_get_soft_state(fp_driver_softstate, instance);
964
965 (void) sprintf(port->fp_ibuf, "fp(%d)", instance);
966
967 port->fp_instance = instance;
968 port->fp_ulp_attach = 1;
969 port->fp_port_num = port_num;
970 port->fp_verbose = fp_verbosity;
971 port->fp_options = fp_options;
972
973 port->fp_fca_dip = ddi_get_parent(dip);
974 port->fp_port_dip = dip;
975 port->fp_fca_tran = (fc_fca_tran_t *)
976 ddi_get_driver_private(port->fp_fca_dip);
977
978 port->fp_task = port->fp_last_task = FP_TASK_IDLE;
979
980 /*
981 * Init the starting value of fp_rscn_count. Note that if
982 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
983 * actual # of RSCNs will be (fp_rscn_count - 1)
984 */
985 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
986
987 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
988 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
989 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
990
991 (void) sprintf(name, "fp%d_cache", instance);
992
993 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
994 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
995 "phyport-instance", -1)) != -1) {
996 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
997 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
998 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
999 port->fp_npiv_type = FC_NPIV_PORT;
1000 }
1001
1002 /*
1003 * Allocate the pool of fc_packet_t structs to be used with
1004 * this fp instance.
1005 */
1006 port->fp_pkt_cache = kmem_cache_create(name,
1007 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1008 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1009 NULL, 0);
1010 port->fp_out_fpcmds = 0;
1011 if (port->fp_pkt_cache == NULL) {
1012 goto cache_alloc_failed;
1013 }
1014
1015
1016 /*
1017 * Allocate the d_id and pwwn hash tables for all remote ports
1018 * connected to this local port.
1019 */
1020 port->fp_did_table = kmem_zalloc(did_table_size *
1021 sizeof (struct d_id_hash), KM_SLEEP);
1022
1023 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1024 sizeof (struct pwwn_hash), KM_SLEEP);
1025
1026 port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1027 MINCLSYSPRI, 1, 16, 0);
1028
1029 /* Indicate that don't have the pm components yet */
1030 port->fp_soft_state |= FP_SOFT_NO_PMCOMP;
1031
1032 /*
1033 * Bind the callbacks with the FCA driver. This will open the gate
1034 * for asynchronous callbacks, so after this call the fp_mutex
1035 * must be held when updating the fc_local_port_t struct.
1036 *
1037 * This is done _before_ setting up the job thread so we can avoid
1038 * cleaning up after the thread_create() in the error path. This
1039 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1040 */
1041 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1042 goto bind_callbacks_failed;
1043 }
1044
1045 if (phyport) {
1046 mutex_enter(&phyport->fp_mutex);
1047 if (phyport->fp_port_next) {
1048 phyport->fp_port_next->fp_port_prev = port;
1049 port->fp_port_next = phyport->fp_port_next;
1050 phyport->fp_port_next = port;
1051 port->fp_port_prev = phyport;
1052 } else {
1053 phyport->fp_port_next = port;
1054 phyport->fp_port_prev = port;
1055 port->fp_port_next = phyport;
1056 port->fp_port_prev = phyport;
1057 }
1058 mutex_exit(&phyport->fp_mutex);
1059 }
1060
1061 /*
1062 * Init Symbolic Names
1063 */
1064 fp_init_symbolic_names(port);
1065
1066 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1067 KM_SLEEP, NULL);
1068
1069 if (pkt == NULL) {
1070 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1071 instance);
1072 goto alloc_els_packet_failed;
1073 }
1074
1075 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1076 v.v_maxsyspri - 2);
1077
1078 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1079 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1080 i_pwwn) != DDI_PROP_SUCCESS) {
1081 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1082 "fp(%d): Updating 'initiator-port' property"
1083 " on fp dev_info node failed", instance);
1084 }
1085
1086 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1087 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1088 i_pwwn) != DDI_PROP_SUCCESS) {
1089 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1090 "fp(%d): Updating 'initiator-node' property"
1091 " on fp dev_info node failed", instance);
1092 }
1093
1094 mutex_enter(&port->fp_mutex);
1095 port->fp_els_resp_pkt = pkt;
1096 mutex_exit(&port->fp_mutex);
1097
1098 /*
1099 * Determine the count of unsolicited buffers this FCA can support
1100 */
1101 fp_retrieve_caps(port);
1102
1103 /*
1104 * Allocate unsolicited buffer tokens
1105 */
1106 if (port->fp_ub_count) {
1107 ub_count = port->fp_ub_count;
1108 port->fp_ub_tokens = kmem_zalloc(ub_count *
1109 sizeof (*port->fp_ub_tokens), KM_SLEEP);
1110 /*
1111 * Do not fail the attach if unsolicited buffer allocation
1112 * fails; Just try to get along with whatever the FCA can do.
1113 */
1114 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1115 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1116 FC_SUCCESS || ub_count != port->fp_ub_count) {
1117 cmn_err(CE_WARN, "fp(%d): failed to allocate "
1118 " Unsolicited buffers. proceeding with attach...",
1119 instance);
1120 kmem_free(port->fp_ub_tokens,
1121 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1122 port->fp_ub_tokens = NULL;
1123 }
1124 }
1125
1126 fp_load_ulp_modules(dip, port);
1127
1128 /*
1129 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1130 */
1131 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1132 "pm-hardware-state", "needs-suspend-resume",
1133 strlen("needs-suspend-resume") + 1);
1134
1135 /*
1136 * fctl maintains a list of all port handles, so
1137 * help fctl add this one to its list now.
1138 */
1139 mutex_enter(&port->fp_mutex);
1140 fctl_add_port(port);
1141
1142 /*
1143 * If a state change is already in progress, set the bind state t
1144 * OFFLINE as well, so further state change callbacks into ULPs
1145 * will pass the appropriate states
1146 */
1147 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1148 port->fp_statec_busy) {
1149 port->fp_bind_state = FC_STATE_OFFLINE;
1150 mutex_exit(&port->fp_mutex);
1151
1152 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1153 } else {
1154 /*
1155 * Without dropping the mutex, ensure that the port
1156 * startup happens ahead of state change callback
1157 * processing
1158 */
1159 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1160
1161 port->fp_last_task = port->fp_task;
1162 port->fp_task = FP_TASK_PORT_STARTUP;
1163
1164 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1165 fp_startup_done, (opaque_t)port, KM_SLEEP);
1166
1167 port->fp_job_head = port->fp_job_tail = job;
1168
1169 cv_signal(&port->fp_cv);
1170
1171 mutex_exit(&port->fp_mutex);
1172 }
1173
1174 mutex_enter(&port->fp_mutex);
1175 while (port->fp_ulp_attach) {
1176 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1177 }
1178 mutex_exit(&port->fp_mutex);
1179
1180 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1181 "pm-components", fp_pm_comps,
1182 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1183 DDI_PROP_SUCCESS) {
1184 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1185 " components property, PM disabled on this port.");
1186 mutex_enter(&port->fp_mutex);
1187 port->fp_pm_level = FP_PM_PORT_UP;
1188 mutex_exit(&port->fp_mutex);
1189 } else {
1190 if (pm_raise_power(dip, FP_PM_COMPONENT,
1191 FP_PM_PORT_UP) != DDI_SUCCESS) {
1192 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1193 " power level");
1194 mutex_enter(&port->fp_mutex);
1195 port->fp_pm_level = FP_PM_PORT_UP;
1196 mutex_exit(&port->fp_mutex);
1197 }
1198
1199 /*
1200 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1201 * the call to pm_raise_power. The PM framework can't
1202 * handle multiple threads calling into it during attach.
1203 */
1204
1205 mutex_enter(&port->fp_mutex);
1206 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP;
1207 mutex_exit(&port->fp_mutex);
1208 }
1209
1210 ddi_report_dev(dip);
1211
1212 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1213
1214 return (DDI_SUCCESS);
1215
1216 /*
1217 * Unwind any/all preceeding allocations in the event of an error.
1218 */
1219
1220 alloc_els_packet_failed:
1221
1222 if (port->fp_fca_handle != NULL) {
1223 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1224 port->fp_fca_handle = NULL;
1225 }
1226
1227 if (port->fp_ub_tokens != NULL) {
1228 (void) fc_ulp_ubfree(port, port->fp_ub_count,
1229 port->fp_ub_tokens);
1230 kmem_free(port->fp_ub_tokens,
1231 port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1232 port->fp_ub_tokens = NULL;
1233 }
1234
1235 if (port->fp_els_resp_pkt != NULL) {
1236 fp_free_pkt(port->fp_els_resp_pkt);
1237 port->fp_els_resp_pkt = NULL;
1238 }
1239
1240 bind_callbacks_failed:
1241
1242 if (port->fp_taskq != NULL) {
1243 taskq_destroy(port->fp_taskq);
1244 }
1245
1246 if (port->fp_pwwn_table != NULL) {
1247 kmem_free(port->fp_pwwn_table,
1248 pwwn_table_size * sizeof (struct pwwn_hash));
1249 port->fp_pwwn_table = NULL;
1250 }
1251
1252 if (port->fp_did_table != NULL) {
1253 kmem_free(port->fp_did_table,
1254 did_table_size * sizeof (struct d_id_hash));
1255 port->fp_did_table = NULL;
1256 }
1257
1258 if (port->fp_pkt_cache != NULL) {
1259 kmem_cache_destroy(port->fp_pkt_cache);
1260 port->fp_pkt_cache = NULL;
1261 }
1262
1263 cache_alloc_failed:
1264
1265 cv_destroy(&port->fp_attach_cv);
1266 cv_destroy(&port->fp_cv);
1267 mutex_destroy(&port->fp_mutex);
1268 ddi_remove_minor_node(port->fp_port_dip, NULL);
1269 ddi_soft_state_free(fp_driver_softstate, instance);
1270 ddi_prop_remove_all(dip);
1271
1272 return (DDI_FAILURE);
1273 }
1274
1275
1276 /*
1277 * Handle DDI_RESUME request
1278 */
1279 static int
fp_resume_handler(dev_info_t * dip)1280 fp_resume_handler(dev_info_t *dip)
1281 {
1282 int rval;
1283 fc_local_port_t *port;
1284
1285 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1286
1287 ASSERT(port != NULL);
1288
1289 #ifdef DEBUG
1290 mutex_enter(&port->fp_mutex);
1291 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1292 mutex_exit(&port->fp_mutex);
1293 #endif
1294
1295 /*
1296 * If the port was power suspended, raise the power level
1297 */
1298 mutex_enter(&port->fp_mutex);
1299 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1300 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1301 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1302
1303 mutex_exit(&port->fp_mutex);
1304 if (pm_raise_power(dip, FP_PM_COMPONENT,
1305 FP_PM_PORT_UP) != DDI_SUCCESS) {
1306 FP_TRACE(FP_NHEAD2(9, 0),
1307 "Failed to raise the power level");
1308 return (DDI_FAILURE);
1309 }
1310 mutex_enter(&port->fp_mutex);
1311 }
1312 port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1313 mutex_exit(&port->fp_mutex);
1314
1315 /*
1316 * All the discovery is initiated and handled by per-port thread.
1317 * Further all the discovery is done in handled in callback mode
1318 * (not polled mode); In a specific case such as this, the discovery
1319 * is required to happen in polled mode. The easiest way out is
1320 * to bail out port thread and get started. Come back and fix this
1321 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1322 * will do on-demand discovery during pre-power-up busctl handling
1323 * which will only be possible when SCSA provides a new HBA vector
1324 * for sending down the PM busctl requests.
1325 */
1326 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1327
1328 rval = fp_resume_all(port, FC_CMD_RESUME);
1329 if (rval != DDI_SUCCESS) {
1330 mutex_enter(&port->fp_mutex);
1331 port->fp_soft_state |= FP_SOFT_SUSPEND;
1332 mutex_exit(&port->fp_mutex);
1333 (void) callb_generic_cpr(&port->fp_cpr_info,
1334 CB_CODE_CPR_CHKPT);
1335 }
1336
1337 return (rval);
1338 }
1339
1340 /*
1341 * Perform FC Port power on initialization
1342 */
1343 static int
fp_power_up(fc_local_port_t * port)1344 fp_power_up(fc_local_port_t *port)
1345 {
1346 int rval;
1347
1348 ASSERT(MUTEX_HELD(&port->fp_mutex));
1349
1350 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1351 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1352
1353 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1354
1355 mutex_exit(&port->fp_mutex);
1356
1357 rval = fp_resume_all(port, FC_CMD_POWER_UP);
1358 if (rval != DDI_SUCCESS) {
1359 mutex_enter(&port->fp_mutex);
1360 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1361 } else {
1362 mutex_enter(&port->fp_mutex);
1363 }
1364
1365 return (rval);
1366 }
1367
1368
1369 /*
1370 * It is important to note that the power may possibly be removed between
1371 * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1372 * FC port hardware would have gone through an OFFLINE to ONLINE transition
1373 * (hardware state). In this case, the port driver may need to rediscover the
1374 * topology, perform LOGINs, register with the name server again and perform
1375 * any such port initialization procedures. To perform LOGINs, the driver could
1376 * use the port device handle to see if a LOGIN needs to be performed and use
1377 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1378 * or removed) which will be reflected in the map the ULPs will see.
1379 */
1380 static int
fp_resume_all(fc_local_port_t * port,fc_attach_cmd_t cmd)1381 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1382 {
1383
1384 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1385
1386 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1387 return (DDI_FAILURE);
1388 }
1389
1390 mutex_enter(&port->fp_mutex);
1391
1392 /*
1393 * If there are commands queued for delayed retry, instead of
1394 * working the hard way to figure out which ones are good for
1395 * restart and which ones not (ELSs are definitely not good
1396 * as the port will have to go through a new spin of rediscovery
1397 * now), so just flush them out.
1398 */
1399 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1400 fp_cmd_t *cmd;
1401
1402 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1403
1404 mutex_exit(&port->fp_mutex);
1405 while ((cmd = fp_deque_cmd(port)) != NULL) {
1406 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1407 fp_iodone(cmd);
1408 }
1409 mutex_enter(&port->fp_mutex);
1410 }
1411
1412 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1413 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1414 port->fp_dev_count) {
1415 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1416 port->fp_offline_tid = timeout(fp_offline_timeout,
1417 (caddr_t)port, fp_offline_ticks);
1418 }
1419 if (port->fp_job_head) {
1420 cv_signal(&port->fp_cv);
1421 }
1422 mutex_exit(&port->fp_mutex);
1423 fctl_attach_ulps(port, cmd, &modlinkage);
1424 } else {
1425 struct job_request *job;
1426
1427 /*
1428 * If an OFFLINE timer was running at the time of
1429 * suspending, there is no need to restart it as
1430 * the port is ONLINE now.
1431 */
1432 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1433 if (port->fp_statec_busy == 0) {
1434 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1435 }
1436 port->fp_statec_busy++;
1437 mutex_exit(&port->fp_mutex);
1438
1439 job = fctl_alloc_job(JOB_PORT_ONLINE,
1440 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1441 fctl_enque_job(port, job);
1442
1443 fctl_jobwait(job);
1444 fctl_remove_oldies(port);
1445
1446 fctl_attach_ulps(port, cmd, &modlinkage);
1447 fctl_dealloc_job(job);
1448 }
1449
1450 return (DDI_SUCCESS);
1451 }
1452
1453
1454 /*
1455 * At this time, there shouldn't be any I/O requests on this port.
1456 * But the unsolicited callbacks from the underlying FCA port need
1457 * to be handled very carefully. The steps followed to handle the
1458 * DDI_DETACH are:
1459 * + Grab the port driver mutex, check if the unsolicited
1460 * callback is currently under processing. If true, fail
1461 * the DDI_DETACH request by printing a message; If false
1462 * mark the DDI_DETACH as under progress, so that any
1463 * further unsolicited callbacks get bounced.
1464 * + Perform PRLO/LOGO if necessary, cleanup all the data
1465 * structures.
1466 * + Get the job_handler thread to gracefully exit.
1467 * + Unregister callbacks with the FCA port.
1468 * + Now that some peace is found, notify all the ULPs of
1469 * DDI_DETACH request (using ulp_port_detach entry point)
1470 * + Free all mutexes, semaphores, conditional variables.
1471 * + Free the soft state, return success.
1472 *
1473 * Important considerations:
1474 * Port driver de-registers state change and unsolicited
1475 * callbacks before taking up the task of notifying ULPs
1476 * and performing PRLO and LOGOs.
1477 *
1478 * A port may go offline at the time PRLO/LOGO is being
1479 * requested. It is expected of all FCA drivers to fail
1480 * such requests either immediately with a FC_OFFLINE
1481 * return code to fc_fca_transport() or return the packet
1482 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1483 */
1484 static int
fp_detach_handler(fc_local_port_t * port)1485 fp_detach_handler(fc_local_port_t *port)
1486 {
1487 job_request_t *job;
1488 uint32_t delay_count;
1489 fc_orphan_t *orp, *tmporp;
1490
1491 /*
1492 * In a Fabric topology with many host ports connected to
1493 * a switch, another detaching instance of fp might have
1494 * triggered a LOGO (which is an unsolicited request to
1495 * this instance). So in order to be able to successfully
1496 * detach by taking care of such cases a delay of about
1497 * 30 seconds is introduced.
1498 */
1499 delay_count = 0;
1500 mutex_enter(&port->fp_mutex);
1501 if (port->fp_out_fpcmds != 0) {
1502 /*
1503 * At this time we can only check fp internal commands, because
1504 * sd/ssd/scsi_vhci should have finsihed all their commands,
1505 * fcp/fcip/fcsm should have finished all their commands.
1506 *
1507 * It seems that all fp internal commands are asynchronous now.
1508 */
1509 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1510 mutex_exit(&port->fp_mutex);
1511
1512 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1513 " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1514 return (DDI_FAILURE);
1515 }
1516
1517 while ((port->fp_soft_state &
1518 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1519 (delay_count < 30)) {
1520 mutex_exit(&port->fp_mutex);
1521 delay_count++;
1522 delay(drv_usectohz(1000000));
1523 mutex_enter(&port->fp_mutex);
1524 }
1525
1526 if (port->fp_soft_state &
1527 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1528 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1529 mutex_exit(&port->fp_mutex);
1530
1531 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1532 " Failing detach", port->fp_instance);
1533 return (DDI_FAILURE);
1534 }
1535
1536 port->fp_soft_state |= FP_SOFT_IN_DETACH;
1537 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1538 mutex_exit(&port->fp_mutex);
1539
1540 /*
1541 * If we're powered down, we need to raise power prior to submitting
1542 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never
1543 * process the shutdown job.
1544 */
1545 if (fctl_busy_port(port) != 0) {
1546 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1547 port->fp_instance);
1548 mutex_enter(&port->fp_mutex);
1549 port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1550 mutex_exit(&port->fp_mutex);
1551 return (DDI_FAILURE);
1552 }
1553
1554 /*
1555 * This will deallocate data structs and cause the "job" thread
1556 * to exit, in preparation for DDI_DETACH on the instance.
1557 * This can sleep for an arbitrary duration, since it waits for
1558 * commands over the wire, timeout(9F) callbacks, etc.
1559 *
1560 * CAUTION: There is still a race here, where the "job" thread
1561 * can still be executing code even tho the fctl_jobwait() call
1562 * below has returned to us. In theory the fp driver could even be
1563 * modunloaded even tho the job thread isn't done executing.
1564 * without creating the race condition.
1565 */
1566 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1567 (opaque_t)port, KM_SLEEP);
1568 fctl_enque_job(port, job);
1569 fctl_jobwait(job);
1570 fctl_dealloc_job(job);
1571
1572
1573 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1574 FP_PM_PORT_DOWN);
1575
1576 if (port->fp_taskq) {
1577 taskq_destroy(port->fp_taskq);
1578 }
1579
1580 ddi_prop_remove_all(port->fp_port_dip);
1581
1582 ddi_remove_minor_node(port->fp_port_dip, NULL);
1583
1584 fctl_remove_port(port);
1585
1586 fp_free_pkt(port->fp_els_resp_pkt);
1587
1588 if (port->fp_ub_tokens) {
1589 if (fc_ulp_ubfree(port, port->fp_ub_count,
1590 port->fp_ub_tokens) != FC_SUCCESS) {
1591 cmn_err(CE_WARN, "fp(%d): couldn't free "
1592 " unsolicited buffers", port->fp_instance);
1593 }
1594 kmem_free(port->fp_ub_tokens,
1595 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1596 port->fp_ub_tokens = NULL;
1597 }
1598
1599 if (port->fp_pkt_cache != NULL) {
1600 kmem_cache_destroy(port->fp_pkt_cache);
1601 }
1602
1603 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1604
1605 mutex_enter(&port->fp_mutex);
1606 if (port->fp_did_table) {
1607 kmem_free(port->fp_did_table, did_table_size *
1608 sizeof (struct d_id_hash));
1609 }
1610
1611 if (port->fp_pwwn_table) {
1612 kmem_free(port->fp_pwwn_table, pwwn_table_size *
1613 sizeof (struct pwwn_hash));
1614 }
1615 orp = port->fp_orphan_list;
1616 while (orp) {
1617 tmporp = orp;
1618 orp = orp->orp_next;
1619 kmem_free(tmporp, sizeof (*orp));
1620 }
1621
1622 mutex_exit(&port->fp_mutex);
1623
1624 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1625
1626 mutex_destroy(&port->fp_mutex);
1627 cv_destroy(&port->fp_attach_cv);
1628 cv_destroy(&port->fp_cv);
1629 ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1630
1631 return (DDI_SUCCESS);
1632 }
1633
1634
1635 /*
1636 * Steps to perform DDI_SUSPEND operation on a FC port
1637 *
1638 * - If already suspended return DDI_FAILURE
1639 * - If already power-suspended return DDI_SUCCESS
1640 * - If an unsolicited callback or state change handling is in
1641 * in progress, throw a warning message, return DDI_FAILURE
1642 * - Cancel timeouts
1643 * - SUSPEND the job_handler thread (means do nothing as it is
1644 * taken care of by the CPR frame work)
1645 */
1646 static int
fp_suspend_handler(fc_local_port_t * port)1647 fp_suspend_handler(fc_local_port_t *port)
1648 {
1649 uint32_t delay_count;
1650
1651 mutex_enter(&port->fp_mutex);
1652
1653 /*
1654 * The following should never happen, but
1655 * let the driver be more defensive here
1656 */
1657 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1658 mutex_exit(&port->fp_mutex);
1659 return (DDI_FAILURE);
1660 }
1661
1662 /*
1663 * If the port is already power suspended, there
1664 * is nothing else to do, So return DDI_SUCCESS,
1665 * but mark the SUSPEND bit in the soft state
1666 * before leaving.
1667 */
1668 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1669 port->fp_soft_state |= FP_SOFT_SUSPEND;
1670 mutex_exit(&port->fp_mutex);
1671 return (DDI_SUCCESS);
1672 }
1673
1674 /*
1675 * Check if an unsolicited callback or state change handling is
1676 * in progress. If true, fail the suspend operation; also throw
1677 * a warning message notifying the failure. Note that Sun PCI
1678 * hotplug spec recommends messages in cases of failure (but
1679 * not flooding the console)
1680 *
1681 * Busy waiting for a short interval (500 millisecond ?) to see
1682 * if the callback processing completes may be another idea. Since
1683 * most of the callback processing involves a lot of work, it
1684 * is safe to just fail the SUSPEND operation. It is definitely
1685 * not bad to fail the SUSPEND operation if the driver is busy.
1686 */
1687 delay_count = 0;
1688 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1689 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1690 mutex_exit(&port->fp_mutex);
1691 delay_count++;
1692 delay(drv_usectohz(1000000));
1693 mutex_enter(&port->fp_mutex);
1694 }
1695
1696 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1697 FP_SOFT_IN_UNSOL_CB)) {
1698 mutex_exit(&port->fp_mutex);
1699 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1700 " Failing suspend", port->fp_instance);
1701 return (DDI_FAILURE);
1702 }
1703
1704 /*
1705 * Check of FC port thread is busy
1706 */
1707 if (port->fp_job_head) {
1708 mutex_exit(&port->fp_mutex);
1709 FP_TRACE(FP_NHEAD2(9, 0),
1710 "FC port thread is busy: Failing suspend");
1711 return (DDI_FAILURE);
1712 }
1713 port->fp_soft_state |= FP_SOFT_SUSPEND;
1714
1715 fp_suspend_all(port);
1716 mutex_exit(&port->fp_mutex);
1717
1718 return (DDI_SUCCESS);
1719 }
1720
1721
1722 /*
1723 * Prepare for graceful power down of a FC port
1724 */
1725 static int
fp_power_down(fc_local_port_t * port)1726 fp_power_down(fc_local_port_t *port)
1727 {
1728 ASSERT(MUTEX_HELD(&port->fp_mutex));
1729
1730 /*
1731 * Power down request followed by a DDI_SUSPEND should
1732 * never happen; If it does return DDI_SUCCESS
1733 */
1734 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1735 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1736 return (DDI_SUCCESS);
1737 }
1738
1739 /*
1740 * If the port is already power suspended, there
1741 * is nothing else to do, So return DDI_SUCCESS,
1742 */
1743 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1744 return (DDI_SUCCESS);
1745 }
1746
1747 /*
1748 * Check if an unsolicited callback or state change handling
1749 * is in progress. If true, fail the PM suspend operation.
1750 * But don't print a message unless the verbosity of the
1751 * driver desires otherwise.
1752 */
1753 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1754 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1755 FP_TRACE(FP_NHEAD2(9, 0),
1756 "Unsolicited callback in progress: Failing power down");
1757 return (DDI_FAILURE);
1758 }
1759
1760 /*
1761 * Check of FC port thread is busy
1762 */
1763 if (port->fp_job_head) {
1764 FP_TRACE(FP_NHEAD2(9, 0),
1765 "FC port thread is busy: Failing power down");
1766 return (DDI_FAILURE);
1767 }
1768 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1769
1770 /*
1771 * check if the ULPs are ready for power down
1772 */
1773 mutex_exit(&port->fp_mutex);
1774 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1775 &modlinkage) != FC_SUCCESS) {
1776 mutex_enter(&port->fp_mutex);
1777 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1778 mutex_exit(&port->fp_mutex);
1779
1780 /*
1781 * Power back up the obedient ULPs that went down
1782 */
1783 fp_attach_ulps(port, FC_CMD_POWER_UP);
1784
1785 FP_TRACE(FP_NHEAD2(9, 0),
1786 "ULP(s) busy, detach_ulps failed. Failing power down");
1787 mutex_enter(&port->fp_mutex);
1788 return (DDI_FAILURE);
1789 }
1790 mutex_enter(&port->fp_mutex);
1791
1792 fp_suspend_all(port);
1793
1794 return (DDI_SUCCESS);
1795 }
1796
1797
1798 /*
1799 * Suspend the entire FC port
1800 */
1801 static void
fp_suspend_all(fc_local_port_t * port)1802 fp_suspend_all(fc_local_port_t *port)
1803 {
1804 int index;
1805 struct pwwn_hash *head;
1806 fc_remote_port_t *pd;
1807
1808 ASSERT(MUTEX_HELD(&port->fp_mutex));
1809
1810 if (port->fp_wait_tid != 0) {
1811 timeout_id_t tid;
1812
1813 tid = port->fp_wait_tid;
1814 port->fp_wait_tid = (timeout_id_t)NULL;
1815 mutex_exit(&port->fp_mutex);
1816 (void) untimeout(tid);
1817 mutex_enter(&port->fp_mutex);
1818 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1819 }
1820
1821 if (port->fp_offline_tid) {
1822 timeout_id_t tid;
1823
1824 tid = port->fp_offline_tid;
1825 port->fp_offline_tid = (timeout_id_t)NULL;
1826 mutex_exit(&port->fp_mutex);
1827 (void) untimeout(tid);
1828 mutex_enter(&port->fp_mutex);
1829 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1830 }
1831 mutex_exit(&port->fp_mutex);
1832 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1833 mutex_enter(&port->fp_mutex);
1834
1835 /*
1836 * Mark all devices as OLD, and reset the LOGIN state as well
1837 * (this will force the ULPs to perform a LOGIN after calling
1838 * fc_portgetmap() during RESUME/PM_RESUME)
1839 */
1840 for (index = 0; index < pwwn_table_size; index++) {
1841 head = &port->fp_pwwn_table[index];
1842 pd = head->pwwn_head;
1843 while (pd != NULL) {
1844 mutex_enter(&pd->pd_mutex);
1845 fp_remote_port_offline(pd);
1846 fctl_delist_did_table(port, pd);
1847 pd->pd_state = PORT_DEVICE_VALID;
1848 pd->pd_login_count = 0;
1849 mutex_exit(&pd->pd_mutex);
1850 pd = pd->pd_wwn_hnext;
1851 }
1852 }
1853 }
1854
1855
1856 /*
1857 * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1858 * Performs intializations for fc_packet_t structs.
1859 * Returns 0 for success or -1 for failure.
1860 *
1861 * This function allocates DMA handles for both command and responses.
1862 * Most of the ELSs used have both command and responses so it is strongly
1863 * desired to move them to cache constructor routine.
1864 *
1865 * Context: Can sleep iff called with KM_SLEEP flag.
1866 */
1867 static int
fp_cache_constructor(void * buf,void * cdarg,int kmflags)1868 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1869 {
1870 int (*cb) (caddr_t);
1871 fc_packet_t *pkt;
1872 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1873 fc_local_port_t *port = (fc_local_port_t *)cdarg;
1874
1875 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1876
1877 cmd->cmd_next = NULL;
1878 cmd->cmd_flags = 0;
1879 cmd->cmd_dflags = 0;
1880 cmd->cmd_job = NULL;
1881 cmd->cmd_port = port;
1882 pkt = &cmd->cmd_pkt;
1883
1884 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
1885 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1886 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1887 &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1888 return (-1);
1889 }
1890
1891 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1892 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1893 &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1894 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1895 return (-1);
1896 }
1897 } else {
1898 pkt->pkt_cmd_dma = 0;
1899 pkt->pkt_resp_dma = 0;
1900 }
1901
1902 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1903 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1904 pkt->pkt_data_cookie_cnt = 0;
1905 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1906 pkt->pkt_data_cookie = NULL;
1907 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1908
1909 return (0);
1910 }
1911
1912
1913 /*
1914 * fp_cache_destructor: Destructor function for kmem_cache_create().
1915 * Performs un-intializations for fc_packet_t structs.
1916 */
1917 /* ARGSUSED */
1918 static void
fp_cache_destructor(void * buf,void * cdarg)1919 fp_cache_destructor(void *buf, void *cdarg)
1920 {
1921 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1922 fc_packet_t *pkt;
1923
1924 pkt = &cmd->cmd_pkt;
1925 if (pkt->pkt_cmd_dma) {
1926 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1927 }
1928
1929 if (pkt->pkt_resp_dma) {
1930 ddi_dma_free_handle(&pkt->pkt_resp_dma);
1931 }
1932 }
1933
1934
1935 /*
1936 * Packet allocation for ELS and any other port driver commands
1937 *
1938 * Some ELSs like FLOGI and PLOGI are critical for topology and
1939 * device discovery and a system's inability to allocate memory
1940 * or DVMA resources while performing some of these critical ELSs
1941 * cause a lot of problem. While memory allocation failures are
1942 * rare, DVMA resource failures are common as the applications
1943 * are becoming more and more powerful on huge servers. So it
1944 * is desirable to have a framework support to reserve a fragment
1945 * of DVMA. So until this is fixed the correct way, the suffering
1946 * is huge whenever a LIP happens at a time DVMA resources are
1947 * drained out completely - So an attempt needs to be made to
1948 * KM_SLEEP while requesting for these resources, hoping that
1949 * the requests won't hang forever.
1950 *
1951 * The fc_remote_port_t argument is stored into the pkt_pd field in the
1952 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This
1953 * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1954 * If there is no fc_remote_port_t associated with the fc_packet_t, then
1955 * fp_alloc_pkt() must be called with pd set to NULL.
1956 *
1957 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen,
1958 * actually, it's a design fault. But there's no problem for physical
1959 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei.
1960 *
1961 * For FCAs that don't support DMA, such as fcoei, we will use
1962 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len.
1963 */
1964
1965 static fp_cmd_t *
fp_alloc_pkt(fc_local_port_t * port,int cmd_len,int resp_len,int kmflags,fc_remote_port_t * pd)1966 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1967 fc_remote_port_t *pd)
1968 {
1969 int rval;
1970 ulong_t real_len;
1971 fp_cmd_t *cmd;
1972 fc_packet_t *pkt;
1973 int (*cb) (caddr_t);
1974 ddi_dma_cookie_t pkt_cookie;
1975 ddi_dma_cookie_t *cp;
1976 uint32_t cnt;
1977
1978 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1979
1980 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1981
1982 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1983 if (cmd == NULL) {
1984 return (cmd);
1985 }
1986
1987 cmd->cmd_ulp_pkt = NULL;
1988 cmd->cmd_flags = 0;
1989 pkt = &cmd->cmd_pkt;
1990 ASSERT(cmd->cmd_dflags == 0);
1991
1992 pkt->pkt_datalen = 0;
1993 pkt->pkt_data = NULL;
1994 pkt->pkt_state = 0;
1995 pkt->pkt_action = 0;
1996 pkt->pkt_reason = 0;
1997 pkt->pkt_expln = 0;
1998 pkt->pkt_cmd = NULL;
1999 pkt->pkt_resp = NULL;
2000 pkt->pkt_fctl_rsvd1 = NULL;
2001 pkt->pkt_fctl_rsvd2 = NULL;
2002
2003 /*
2004 * Init pkt_pd with the given pointer; this must be done _before_
2005 * the call to fc_ulp_init_packet().
2006 */
2007 pkt->pkt_pd = pd;
2008
2009 /* Now call the FCA driver to init its private, per-packet fields */
2010 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
2011 goto alloc_pkt_failed;
2012 }
2013
2014 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2015 ASSERT(pkt->pkt_cmd_dma != NULL);
2016
2017 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2018 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2019 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2020 &pkt->pkt_cmd_acc);
2021
2022 if (rval != DDI_SUCCESS) {
2023 goto alloc_pkt_failed;
2024 }
2025 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2026
2027 if (real_len < cmd_len) {
2028 goto alloc_pkt_failed;
2029 }
2030
2031 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2032 pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2033 DDI_DMA_CONSISTENT, cb, NULL,
2034 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2035
2036 if (rval != DDI_DMA_MAPPED) {
2037 goto alloc_pkt_failed;
2038 }
2039
2040 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2041
2042 if (pkt->pkt_cmd_cookie_cnt >
2043 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2044 goto alloc_pkt_failed;
2045 }
2046
2047 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2048
2049 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2050 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2051 KM_NOSLEEP);
2052
2053 if (cp == NULL) {
2054 goto alloc_pkt_failed;
2055 }
2056
2057 *cp = pkt_cookie;
2058 cp++;
2059 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2060 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2061 *cp = pkt_cookie;
2062 }
2063 } else if (cmd_len != 0) {
2064 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP);
2065 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len;
2066 }
2067
2068 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2069 ASSERT(pkt->pkt_resp_dma != NULL);
2070
2071 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2072 port->fp_fca_tran->fca_acc_attr,
2073 DDI_DMA_CONSISTENT, cb, NULL,
2074 (caddr_t *)&pkt->pkt_resp, &real_len,
2075 &pkt->pkt_resp_acc);
2076
2077 if (rval != DDI_SUCCESS) {
2078 goto alloc_pkt_failed;
2079 }
2080 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2081
2082 if (real_len < resp_len) {
2083 goto alloc_pkt_failed;
2084 }
2085
2086 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2087 pkt->pkt_resp, real_len, DDI_DMA_READ |
2088 DDI_DMA_CONSISTENT, cb, NULL,
2089 &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2090
2091 if (rval != DDI_DMA_MAPPED) {
2092 goto alloc_pkt_failed;
2093 }
2094
2095 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2096
2097 if (pkt->pkt_resp_cookie_cnt >
2098 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2099 goto alloc_pkt_failed;
2100 }
2101
2102 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2103
2104 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2105 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2106 KM_NOSLEEP);
2107
2108 if (cp == NULL) {
2109 goto alloc_pkt_failed;
2110 }
2111
2112 *cp = pkt_cookie;
2113 cp++;
2114 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2115 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2116 *cp = pkt_cookie;
2117 }
2118 } else if (resp_len != 0) {
2119 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP);
2120 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len;
2121 }
2122
2123 pkt->pkt_cmdlen = cmd_len;
2124 pkt->pkt_rsplen = resp_len;
2125 pkt->pkt_ulp_private = cmd;
2126
2127 return (cmd);
2128
2129 alloc_pkt_failed:
2130
2131 fp_free_dma(cmd);
2132
2133 if (pkt->pkt_cmd_cookie != NULL) {
2134 kmem_free(pkt->pkt_cmd_cookie,
2135 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2136 pkt->pkt_cmd_cookie = NULL;
2137 }
2138
2139 if (pkt->pkt_resp_cookie != NULL) {
2140 kmem_free(pkt->pkt_resp_cookie,
2141 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2142 pkt->pkt_resp_cookie = NULL;
2143 }
2144
2145 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2146 if (pkt->pkt_cmd) {
2147 kmem_free(pkt->pkt_cmd, cmd_len);
2148 }
2149
2150 if (pkt->pkt_resp) {
2151 kmem_free(pkt->pkt_resp, resp_len);
2152 }
2153 }
2154
2155 kmem_cache_free(port->fp_pkt_cache, cmd);
2156
2157 return (NULL);
2158 }
2159
2160
2161 /*
2162 * Free FC packet
2163 */
2164 static void
fp_free_pkt(fp_cmd_t * cmd)2165 fp_free_pkt(fp_cmd_t *cmd)
2166 {
2167 fc_local_port_t *port;
2168 fc_packet_t *pkt;
2169
2170 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2171
2172 cmd->cmd_next = NULL;
2173 cmd->cmd_job = NULL;
2174 pkt = &cmd->cmd_pkt;
2175 pkt->pkt_ulp_private = 0;
2176 pkt->pkt_tran_flags = 0;
2177 pkt->pkt_tran_type = 0;
2178 port = cmd->cmd_port;
2179
2180 if (pkt->pkt_cmd_cookie != NULL) {
2181 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2182 sizeof (ddi_dma_cookie_t));
2183 pkt->pkt_cmd_cookie = NULL;
2184 }
2185
2186 if (pkt->pkt_resp_cookie != NULL) {
2187 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2188 sizeof (ddi_dma_cookie_t));
2189 pkt->pkt_resp_cookie = NULL;
2190 }
2191
2192 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2193 if (pkt->pkt_cmd) {
2194 kmem_free(pkt->pkt_cmd,
2195 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1);
2196 }
2197
2198 if (pkt->pkt_resp) {
2199 kmem_free(pkt->pkt_resp,
2200 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2);
2201 }
2202 }
2203
2204 fp_free_dma(cmd);
2205 (void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2206 kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2207 }
2208
2209
2210 /*
2211 * Release DVMA resources
2212 */
2213 static void
fp_free_dma(fp_cmd_t * cmd)2214 fp_free_dma(fp_cmd_t *cmd)
2215 {
2216 fc_packet_t *pkt = &cmd->cmd_pkt;
2217
2218 pkt->pkt_cmdlen = 0;
2219 pkt->pkt_rsplen = 0;
2220 pkt->pkt_tran_type = 0;
2221 pkt->pkt_tran_flags = 0;
2222
2223 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2224 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2225 }
2226
2227 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2228 if (pkt->pkt_cmd_acc) {
2229 ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2230 }
2231 }
2232
2233 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2234 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2235 }
2236
2237 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2238 if (pkt->pkt_resp_acc) {
2239 ddi_dma_mem_free(&pkt->pkt_resp_acc);
2240 }
2241 }
2242 cmd->cmd_dflags = 0;
2243 }
2244
2245
2246 /*
2247 * Dedicated thread to perform various activities. One thread for
2248 * each fc_local_port_t (driver soft state) instance.
2249 * Note, this effectively works out to one thread for each local
2250 * port, but there are also some Solaris taskq threads in use on a per-local
2251 * port basis; these also need to be taken into consideration.
2252 */
2253 static void
fp_job_handler(fc_local_port_t * port)2254 fp_job_handler(fc_local_port_t *port)
2255 {
2256 int rval;
2257 uint32_t *d_id;
2258 fc_remote_port_t *pd;
2259 job_request_t *job;
2260
2261 #ifndef __lock_lint
2262 /*
2263 * Solaris-internal stuff for proper operation of kernel threads
2264 * with Solaris CPR.
2265 */
2266 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2267 callb_generic_cpr, "fp_job_handler");
2268 #endif
2269
2270
2271 /* Loop forever waiting for work to do */
2272 for (;;) {
2273
2274 mutex_enter(&port->fp_mutex);
2275
2276 /*
2277 * Sleep if no work to do right now, or if we want
2278 * to suspend or power-down.
2279 */
2280 while (port->fp_job_head == NULL ||
2281 (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2282 FP_SOFT_SUSPEND))) {
2283 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2284 cv_wait(&port->fp_cv, &port->fp_mutex);
2285 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2286 }
2287
2288 /*
2289 * OK, we've just been woken up, so retrieve the next entry
2290 * from the head of the job queue for this local port.
2291 */
2292 job = fctl_deque_job(port);
2293
2294 /*
2295 * Handle all the fp driver's supported job codes here
2296 * in this big honkin' switch.
2297 */
2298 switch (job->job_code) {
2299 case JOB_PORT_SHUTDOWN:
2300 /*
2301 * fp_port_shutdown() is only called from here. This
2302 * will prepare the local port instance (softstate)
2303 * for detaching. This cancels timeout callbacks,
2304 * executes LOGOs with remote ports, cleans up tables,
2305 * and deallocates data structs.
2306 */
2307 fp_port_shutdown(port, job);
2308
2309 /*
2310 * This will exit the job thread.
2311 */
2312 #ifndef __lock_lint
2313 CALLB_CPR_EXIT(&(port->fp_cpr_info));
2314 #else
2315 mutex_exit(&port->fp_mutex);
2316 #endif
2317 fctl_jobdone(job);
2318 thread_exit();
2319
2320 /* NOTREACHED */
2321
2322 case JOB_ATTACH_ULP: {
2323 /*
2324 * This job is spawned in response to a ULP calling
2325 * fc_ulp_add().
2326 */
2327
2328 boolean_t do_attach_ulps = B_TRUE;
2329
2330 /*
2331 * If fp is detaching, we don't want to call
2332 * fp_startup_done as this asynchronous
2333 * notification may interfere with the re-attach.
2334 */
2335
2336 if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2337 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2338 do_attach_ulps = B_FALSE;
2339 } else {
2340 /*
2341 * We are going to force the transport
2342 * to attach to the ULPs, so set
2343 * fp_ulp_attach. This will keep any
2344 * potential detach from occurring until
2345 * we are done.
2346 */
2347 port->fp_ulp_attach = 1;
2348 }
2349
2350 mutex_exit(&port->fp_mutex);
2351
2352 /*
2353 * NOTE: Since we just dropped the mutex, there is now
2354 * a race window where the fp_soft_state check above
2355 * could change here. This race is covered because an
2356 * additional check was added in the functions hidden
2357 * under fp_startup_done().
2358 */
2359 if (do_attach_ulps == B_TRUE) {
2360 /*
2361 * This goes thru a bit of a convoluted call
2362 * chain before spawning off a DDI taskq
2363 * request to perform the actual attach
2364 * operations. Blocking can occur at a number
2365 * of points.
2366 */
2367 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2368 }
2369 job->job_result = FC_SUCCESS;
2370 fctl_jobdone(job);
2371 break;
2372 }
2373
2374 case JOB_ULP_NOTIFY: {
2375 /*
2376 * Pass state change notifications up to any/all
2377 * registered ULPs.
2378 */
2379 uint32_t statec;
2380
2381 statec = job->job_ulp_listlen;
2382 if (statec == FC_STATE_RESET_REQUESTED) {
2383 port->fp_last_task = port->fp_task;
2384 port->fp_task = FP_TASK_OFFLINE;
2385 fp_port_offline(port, 0);
2386 port->fp_task = port->fp_last_task;
2387 port->fp_last_task = FP_TASK_IDLE;
2388 }
2389
2390 if (--port->fp_statec_busy == 0) {
2391 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2392 }
2393
2394 mutex_exit(&port->fp_mutex);
2395
2396 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2397 fctl_jobdone(job);
2398 break;
2399 }
2400
2401 case JOB_PLOGI_ONE:
2402 /*
2403 * Issue a PLOGI to a single remote port. Multiple
2404 * PLOGIs to different remote ports may occur in
2405 * parallel.
2406 * This can create the fc_remote_port_t if it does not
2407 * already exist.
2408 */
2409
2410 mutex_exit(&port->fp_mutex);
2411 d_id = (uint32_t *)job->job_private;
2412 pd = fctl_get_remote_port_by_did(port, *d_id);
2413
2414 if (pd) {
2415 mutex_enter(&pd->pd_mutex);
2416 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2417 pd->pd_login_count++;
2418 mutex_exit(&pd->pd_mutex);
2419 job->job_result = FC_SUCCESS;
2420 fctl_jobdone(job);
2421 break;
2422 }
2423 mutex_exit(&pd->pd_mutex);
2424 } else {
2425 mutex_enter(&port->fp_mutex);
2426 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2427 mutex_exit(&port->fp_mutex);
2428 pd = fp_create_remote_port_by_ns(port,
2429 *d_id, KM_SLEEP);
2430 if (pd == NULL) {
2431 job->job_result = FC_FAILURE;
2432 fctl_jobdone(job);
2433 break;
2434 }
2435 } else {
2436 mutex_exit(&port->fp_mutex);
2437 }
2438 }
2439
2440 job->job_flags |= JOB_TYPE_FP_ASYNC;
2441 job->job_counter = 1;
2442
2443 rval = fp_port_login(port, *d_id, job,
2444 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2445
2446 if (rval != FC_SUCCESS) {
2447 job->job_result = rval;
2448 fctl_jobdone(job);
2449 }
2450 break;
2451
2452 case JOB_LOGO_ONE: {
2453 /*
2454 * Issue a PLOGO to a single remote port. Multiple
2455 * PLOGOs to different remote ports may occur in
2456 * parallel.
2457 */
2458 fc_remote_port_t *pd;
2459
2460 #ifndef __lock_lint
2461 ASSERT(job->job_counter > 0);
2462 #endif
2463
2464 pd = (fc_remote_port_t *)job->job_ulp_pkts;
2465
2466 mutex_enter(&pd->pd_mutex);
2467 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2468 mutex_exit(&pd->pd_mutex);
2469 job->job_result = FC_LOGINREQ;
2470 mutex_exit(&port->fp_mutex);
2471 fctl_jobdone(job);
2472 break;
2473 }
2474 if (pd->pd_login_count > 1) {
2475 pd->pd_login_count--;
2476 mutex_exit(&pd->pd_mutex);
2477 job->job_result = FC_SUCCESS;
2478 mutex_exit(&port->fp_mutex);
2479 fctl_jobdone(job);
2480 break;
2481 }
2482 mutex_exit(&pd->pd_mutex);
2483 mutex_exit(&port->fp_mutex);
2484 job->job_flags |= JOB_TYPE_FP_ASYNC;
2485 (void) fp_logout(port, pd, job);
2486 break;
2487 }
2488
2489 case JOB_FCIO_LOGIN:
2490 /*
2491 * PLOGI initiated at ioctl request.
2492 */
2493 mutex_exit(&port->fp_mutex);
2494 job->job_result =
2495 fp_fcio_login(port, job->job_private, job);
2496 fctl_jobdone(job);
2497 break;
2498
2499 case JOB_FCIO_LOGOUT:
2500 /*
2501 * PLOGO initiated at ioctl request.
2502 */
2503 mutex_exit(&port->fp_mutex);
2504 job->job_result =
2505 fp_fcio_logout(port, job->job_private, job);
2506 fctl_jobdone(job);
2507 break;
2508
2509 case JOB_PORT_GETMAP:
2510 case JOB_PORT_GETMAP_PLOGI_ALL: {
2511 port->fp_last_task = port->fp_task;
2512 port->fp_task = FP_TASK_GETMAP;
2513
2514 switch (port->fp_topology) {
2515 case FC_TOP_PRIVATE_LOOP:
2516 job->job_counter = 1;
2517
2518 fp_get_loopmap(port, job);
2519 mutex_exit(&port->fp_mutex);
2520 fp_jobwait(job);
2521 fctl_fillout_map(port,
2522 (fc_portmap_t **)job->job_private,
2523 (uint32_t *)job->job_arg, 1, 0, 0);
2524 fctl_jobdone(job);
2525 mutex_enter(&port->fp_mutex);
2526 break;
2527
2528 case FC_TOP_PUBLIC_LOOP:
2529 case FC_TOP_FABRIC:
2530 mutex_exit(&port->fp_mutex);
2531 job->job_counter = 1;
2532
2533 job->job_result = fp_ns_getmap(port,
2534 job, (fc_portmap_t **)job->job_private,
2535 (uint32_t *)job->job_arg,
2536 FCTL_GAN_START_ID);
2537 fctl_jobdone(job);
2538 mutex_enter(&port->fp_mutex);
2539 break;
2540
2541 case FC_TOP_PT_PT:
2542 mutex_exit(&port->fp_mutex);
2543 fctl_fillout_map(port,
2544 (fc_portmap_t **)job->job_private,
2545 (uint32_t *)job->job_arg, 1, 0, 0);
2546 fctl_jobdone(job);
2547 mutex_enter(&port->fp_mutex);
2548 break;
2549
2550 default:
2551 mutex_exit(&port->fp_mutex);
2552 fctl_jobdone(job);
2553 mutex_enter(&port->fp_mutex);
2554 break;
2555 }
2556 port->fp_task = port->fp_last_task;
2557 port->fp_last_task = FP_TASK_IDLE;
2558 mutex_exit(&port->fp_mutex);
2559 break;
2560 }
2561
2562 case JOB_PORT_OFFLINE: {
2563 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2564
2565 port->fp_last_task = port->fp_task;
2566 port->fp_task = FP_TASK_OFFLINE;
2567
2568 if (port->fp_statec_busy > 2) {
2569 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2570 fp_port_offline(port, 0);
2571 if (--port->fp_statec_busy == 0) {
2572 port->fp_soft_state &=
2573 ~FP_SOFT_IN_STATEC_CB;
2574 }
2575 } else {
2576 fp_port_offline(port, 1);
2577 }
2578
2579 port->fp_task = port->fp_last_task;
2580 port->fp_last_task = FP_TASK_IDLE;
2581
2582 mutex_exit(&port->fp_mutex);
2583
2584 fctl_jobdone(job);
2585 break;
2586 }
2587
2588 case JOB_PORT_STARTUP: {
2589 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2590 if (port->fp_statec_busy > 1) {
2591 mutex_exit(&port->fp_mutex);
2592 break;
2593 }
2594 mutex_exit(&port->fp_mutex);
2595
2596 FP_TRACE(FP_NHEAD2(9, rval),
2597 "Topology discovery failed");
2598 break;
2599 }
2600
2601 /*
2602 * Attempt building device handles in case
2603 * of private Loop.
2604 */
2605 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2606 job->job_counter = 1;
2607
2608 fp_get_loopmap(port, job);
2609 mutex_exit(&port->fp_mutex);
2610 fp_jobwait(job);
2611 mutex_enter(&port->fp_mutex);
2612 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2613 ASSERT(port->fp_total_devices == 0);
2614 port->fp_total_devices =
2615 port->fp_dev_count;
2616 }
2617 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2618 /*
2619 * Hack to avoid state changes going up early
2620 */
2621 port->fp_statec_busy++;
2622 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2623
2624 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2625 fp_fabric_online(port, job);
2626 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2627 }
2628 mutex_exit(&port->fp_mutex);
2629 fctl_jobdone(job);
2630 break;
2631 }
2632
2633 case JOB_PORT_ONLINE: {
2634 char *newtop;
2635 char *oldtop;
2636 uint32_t old_top;
2637
2638 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2639
2640 /*
2641 * Bail out early if there are a lot of
2642 * state changes in the pipeline
2643 */
2644 if (port->fp_statec_busy > 1) {
2645 --port->fp_statec_busy;
2646 mutex_exit(&port->fp_mutex);
2647 fctl_jobdone(job);
2648 break;
2649 }
2650
2651 switch (old_top = port->fp_topology) {
2652 case FC_TOP_PRIVATE_LOOP:
2653 oldtop = "Private Loop";
2654 break;
2655
2656 case FC_TOP_PUBLIC_LOOP:
2657 oldtop = "Public Loop";
2658 break;
2659
2660 case FC_TOP_PT_PT:
2661 oldtop = "Point to Point";
2662 break;
2663
2664 case FC_TOP_FABRIC:
2665 oldtop = "Fabric";
2666 break;
2667
2668 default:
2669 oldtop = NULL;
2670 break;
2671 }
2672
2673 port->fp_last_task = port->fp_task;
2674 port->fp_task = FP_TASK_ONLINE;
2675
2676 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2677
2678 port->fp_task = port->fp_last_task;
2679 port->fp_last_task = FP_TASK_IDLE;
2680
2681 if (port->fp_statec_busy > 1) {
2682 --port->fp_statec_busy;
2683 mutex_exit(&port->fp_mutex);
2684 break;
2685 }
2686
2687 port->fp_state = FC_STATE_OFFLINE;
2688
2689 FP_TRACE(FP_NHEAD2(9, rval),
2690 "Topology discovery failed");
2691
2692 if (--port->fp_statec_busy == 0) {
2693 port->fp_soft_state &=
2694 ~FP_SOFT_IN_STATEC_CB;
2695 }
2696
2697 if (port->fp_offline_tid == NULL) {
2698 port->fp_offline_tid =
2699 timeout(fp_offline_timeout,
2700 (caddr_t)port, fp_offline_ticks);
2701 }
2702
2703 mutex_exit(&port->fp_mutex);
2704 break;
2705 }
2706
2707 switch (port->fp_topology) {
2708 case FC_TOP_PRIVATE_LOOP:
2709 newtop = "Private Loop";
2710 break;
2711
2712 case FC_TOP_PUBLIC_LOOP:
2713 newtop = "Public Loop";
2714 break;
2715
2716 case FC_TOP_PT_PT:
2717 newtop = "Point to Point";
2718 break;
2719
2720 case FC_TOP_FABRIC:
2721 newtop = "Fabric";
2722 break;
2723
2724 default:
2725 newtop = NULL;
2726 break;
2727 }
2728
2729 if (oldtop && newtop && strcmp(oldtop, newtop)) {
2730 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2731 "Change in FC Topology old = %s new = %s",
2732 oldtop, newtop);
2733 }
2734
2735 switch (port->fp_topology) {
2736 case FC_TOP_PRIVATE_LOOP: {
2737 int orphan = (old_top == FC_TOP_FABRIC ||
2738 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2739
2740 mutex_exit(&port->fp_mutex);
2741 fp_loop_online(port, job, orphan);
2742 break;
2743 }
2744
2745 case FC_TOP_PUBLIC_LOOP:
2746 /* FALLTHROUGH */
2747 case FC_TOP_FABRIC:
2748 fp_fabric_online(port, job);
2749 mutex_exit(&port->fp_mutex);
2750 break;
2751
2752 case FC_TOP_PT_PT:
2753 fp_p2p_online(port, job);
2754 mutex_exit(&port->fp_mutex);
2755 break;
2756
2757 default:
2758 if (--port->fp_statec_busy != 0) {
2759 /*
2760 * Watch curiously at what the next
2761 * state transition can do.
2762 */
2763 mutex_exit(&port->fp_mutex);
2764 break;
2765 }
2766
2767 FP_TRACE(FP_NHEAD2(9, 0),
2768 "Topology Unknown, Offlining the port..");
2769
2770 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2771 port->fp_state = FC_STATE_OFFLINE;
2772
2773 if (port->fp_offline_tid == NULL) {
2774 port->fp_offline_tid =
2775 timeout(fp_offline_timeout,
2776 (caddr_t)port, fp_offline_ticks);
2777 }
2778 mutex_exit(&port->fp_mutex);
2779 break;
2780 }
2781
2782 mutex_enter(&port->fp_mutex);
2783
2784 port->fp_task = port->fp_last_task;
2785 port->fp_last_task = FP_TASK_IDLE;
2786
2787 mutex_exit(&port->fp_mutex);
2788
2789 fctl_jobdone(job);
2790 break;
2791 }
2792
2793 case JOB_PLOGI_GROUP: {
2794 mutex_exit(&port->fp_mutex);
2795 fp_plogi_group(port, job);
2796 break;
2797 }
2798
2799 case JOB_UNSOL_REQUEST: {
2800 mutex_exit(&port->fp_mutex);
2801 fp_handle_unsol_buf(port,
2802 (fc_unsol_buf_t *)job->job_private, job);
2803 fctl_dealloc_job(job);
2804 break;
2805 }
2806
2807 case JOB_NS_CMD: {
2808 fctl_ns_req_t *ns_cmd;
2809
2810 mutex_exit(&port->fp_mutex);
2811
2812 job->job_flags |= JOB_TYPE_FP_ASYNC;
2813 ns_cmd = (fctl_ns_req_t *)job->job_private;
2814 if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2815 ns_cmd->ns_cmd_code > NS_DA_ID) {
2816 job->job_result = FC_BADCMD;
2817 fctl_jobdone(job);
2818 break;
2819 }
2820
2821 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2822 if (ns_cmd->ns_pd != NULL) {
2823 job->job_result = FC_BADOBJECT;
2824 fctl_jobdone(job);
2825 break;
2826 }
2827
2828 job->job_counter = 1;
2829
2830 rval = fp_ns_reg(port, ns_cmd->ns_pd,
2831 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2832
2833 if (rval != FC_SUCCESS) {
2834 job->job_result = rval;
2835 fctl_jobdone(job);
2836 }
2837 break;
2838 }
2839 job->job_result = FC_SUCCESS;
2840 job->job_counter = 1;
2841
2842 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2843 if (rval != FC_SUCCESS) {
2844 fctl_jobdone(job);
2845 }
2846 break;
2847 }
2848
2849 case JOB_LINK_RESET: {
2850 la_wwn_t *pwwn;
2851 uint32_t topology;
2852
2853 pwwn = (la_wwn_t *)job->job_private;
2854 ASSERT(pwwn != NULL);
2855
2856 topology = port->fp_topology;
2857 mutex_exit(&port->fp_mutex);
2858
2859 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2860 topology == FC_TOP_PRIVATE_LOOP) {
2861 job->job_flags |= JOB_TYPE_FP_ASYNC;
2862 rval = port->fp_fca_tran->fca_reset(
2863 port->fp_fca_handle, FC_FCA_LINK_RESET);
2864 job->job_result = rval;
2865 fp_jobdone(job);
2866 } else {
2867 ASSERT((job->job_flags &
2868 JOB_TYPE_FP_ASYNC) == 0);
2869
2870 if (FC_IS_TOP_SWITCH(topology)) {
2871 rval = fp_remote_lip(port, pwwn,
2872 KM_SLEEP, job);
2873 } else {
2874 rval = FC_FAILURE;
2875 }
2876 if (rval != FC_SUCCESS) {
2877 job->job_result = rval;
2878 }
2879 fctl_jobdone(job);
2880 }
2881 break;
2882 }
2883
2884 default:
2885 mutex_exit(&port->fp_mutex);
2886 job->job_result = FC_BADCMD;
2887 fctl_jobdone(job);
2888 break;
2889 }
2890 }
2891 /* NOTREACHED */
2892 }
2893
2894
2895 /*
2896 * Perform FC port bring up initialization
2897 */
2898 static int
fp_port_startup(fc_local_port_t * port,job_request_t * job)2899 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2900 {
2901 int rval;
2902 uint32_t state;
2903 uint32_t src_id;
2904 fc_lilpmap_t *lilp_map;
2905
2906 ASSERT(MUTEX_HELD(&port->fp_mutex));
2907 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2908
2909 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2910 " port=%p, job=%p", port, job);
2911
2912 port->fp_topology = FC_TOP_UNKNOWN;
2913 port->fp_port_id.port_id = 0;
2914 state = FC_PORT_STATE_MASK(port->fp_state);
2915
2916 if (state == FC_STATE_OFFLINE) {
2917 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2918 job->job_result = FC_OFFLINE;
2919 mutex_exit(&port->fp_mutex);
2920 fctl_jobdone(job);
2921 mutex_enter(&port->fp_mutex);
2922 return (FC_OFFLINE);
2923 }
2924
2925 if (state == FC_STATE_LOOP) {
2926 port->fp_port_type.port_type = FC_NS_PORT_NL;
2927 mutex_exit(&port->fp_mutex);
2928
2929 lilp_map = &port->fp_lilp_map;
2930 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2931 job->job_result = FC_FAILURE;
2932 fctl_jobdone(job);
2933
2934 FP_TRACE(FP_NHEAD1(9, rval),
2935 "LILP map Invalid or not present");
2936 mutex_enter(&port->fp_mutex);
2937 return (FC_FAILURE);
2938 }
2939
2940 if (lilp_map->lilp_length == 0) {
2941 job->job_result = FC_NO_MAP;
2942 fctl_jobdone(job);
2943 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2944 "LILP map length zero");
2945 mutex_enter(&port->fp_mutex);
2946 return (FC_NO_MAP);
2947 }
2948 src_id = lilp_map->lilp_myalpa & 0xFF;
2949 } else {
2950 fc_remote_port_t *pd;
2951 fc_fca_pm_t pm;
2952 fc_fca_p2p_info_t p2p_info;
2953 int pd_recepient;
2954
2955 /*
2956 * Get P2P remote port info if possible
2957 */
2958 bzero((caddr_t)&pm, sizeof (pm));
2959
2960 pm.pm_cmd_flags = FC_FCA_PM_READ;
2961 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2962 pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2963 pm.pm_data_buf = (caddr_t)&p2p_info;
2964
2965 rval = port->fp_fca_tran->fca_port_manage(
2966 port->fp_fca_handle, &pm);
2967
2968 if (rval == FC_SUCCESS) {
2969 port->fp_port_id.port_id = p2p_info.fca_d_id;
2970 port->fp_port_type.port_type = FC_NS_PORT_N;
2971 port->fp_topology = FC_TOP_PT_PT;
2972 port->fp_total_devices = 1;
2973 pd_recepient = fctl_wwn_cmp(
2974 &port->fp_service_params.nport_ww_name,
2975 &p2p_info.pwwn) < 0 ?
2976 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2977 mutex_exit(&port->fp_mutex);
2978 pd = fctl_create_remote_port(port,
2979 &p2p_info.nwwn,
2980 &p2p_info.pwwn,
2981 p2p_info.d_id,
2982 pd_recepient, KM_NOSLEEP);
2983 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2984 " P2P port=%p pd=%p fp %x pd %x", port, pd,
2985 port->fp_port_id.port_id, p2p_info.d_id);
2986 mutex_enter(&port->fp_mutex);
2987 return (FC_SUCCESS);
2988 }
2989 port->fp_port_type.port_type = FC_NS_PORT_N;
2990 mutex_exit(&port->fp_mutex);
2991 src_id = 0;
2992 }
2993
2994 job->job_counter = 1;
2995 job->job_result = FC_SUCCESS;
2996
2997 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2998 KM_SLEEP)) != FC_SUCCESS) {
2999 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
3000 job->job_result = FC_FAILURE;
3001 fctl_jobdone(job);
3002
3003 mutex_enter(&port->fp_mutex);
3004 if (port->fp_statec_busy <= 1) {
3005 mutex_exit(&port->fp_mutex);
3006 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
3007 "Couldn't transport FLOGI");
3008 mutex_enter(&port->fp_mutex);
3009 }
3010 return (FC_FAILURE);
3011 }
3012
3013 fp_jobwait(job);
3014
3015 mutex_enter(&port->fp_mutex);
3016 if (job->job_result == FC_SUCCESS) {
3017 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3018 mutex_exit(&port->fp_mutex);
3019 fp_ns_init(port, job, KM_SLEEP);
3020 mutex_enter(&port->fp_mutex);
3021 }
3022 } else {
3023 if (state == FC_STATE_LOOP) {
3024 port->fp_topology = FC_TOP_PRIVATE_LOOP;
3025 port->fp_port_id.port_id =
3026 port->fp_lilp_map.lilp_myalpa & 0xFF;
3027 }
3028 }
3029
3030 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
3031 port, job);
3032
3033 return (FC_SUCCESS);
3034 }
3035
3036
3037 /*
3038 * Perform ULP invocations following FC port startup
3039 */
3040 /* ARGSUSED */
3041 static void
fp_startup_done(opaque_t arg,uchar_t result)3042 fp_startup_done(opaque_t arg, uchar_t result)
3043 {
3044 fc_local_port_t *port = arg;
3045
3046 fp_attach_ulps(port, FC_CMD_ATTACH);
3047
3048 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3049 }
3050
3051
3052 /*
3053 * Perform ULP port attach
3054 */
3055 static void
fp_ulp_port_attach(void * arg)3056 fp_ulp_port_attach(void *arg)
3057 {
3058 fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3059 fc_local_port_t *port = att->att_port;
3060
3061 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3062 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3063
3064 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3065
3066 if (att->att_need_pm_idle == B_TRUE) {
3067 fctl_idle_port(port);
3068 }
3069
3070 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3071 " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3072
3073 mutex_enter(&att->att_port->fp_mutex);
3074 att->att_port->fp_ulp_attach = 0;
3075
3076 port->fp_task = port->fp_last_task;
3077 port->fp_last_task = FP_TASK_IDLE;
3078
3079 cv_signal(&att->att_port->fp_attach_cv);
3080
3081 mutex_exit(&att->att_port->fp_mutex);
3082
3083 kmem_free(att, sizeof (fp_soft_attach_t));
3084 }
3085
3086 /*
3087 * Entry point to funnel all requests down to FCAs
3088 */
3089 static int
fp_sendcmd(fc_local_port_t * port,fp_cmd_t * cmd,opaque_t fca_handle)3090 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3091 {
3092 int rval;
3093
3094 mutex_enter(&port->fp_mutex);
3095 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3096 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3097 FC_STATE_OFFLINE))) {
3098 /*
3099 * This means there is more than one state change
3100 * at this point of time - Since they are processed
3101 * serially, any processing of the current one should
3102 * be failed, failed and move up in processing the next
3103 */
3104 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3105 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3106 if (cmd->cmd_job) {
3107 /*
3108 * A state change that is going to be invalidated
3109 * by another one already in the port driver's queue
3110 * need not go up to all ULPs. This will minimize
3111 * needless processing and ripples in ULP modules
3112 */
3113 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3114 }
3115 mutex_exit(&port->fp_mutex);
3116 return (FC_STATEC_BUSY);
3117 }
3118
3119 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3120 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3121 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3122 mutex_exit(&port->fp_mutex);
3123
3124 return (FC_OFFLINE);
3125 }
3126 mutex_exit(&port->fp_mutex);
3127
3128 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3129 if (rval != FC_SUCCESS) {
3130 if (rval == FC_TRAN_BUSY) {
3131 cmd->cmd_retry_interval = fp_retry_delay;
3132 rval = fp_retry_cmd(&cmd->cmd_pkt);
3133 if (rval == FC_FAILURE) {
3134 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3135 }
3136 }
3137 } else {
3138 mutex_enter(&port->fp_mutex);
3139 port->fp_out_fpcmds++;
3140 mutex_exit(&port->fp_mutex);
3141 }
3142
3143 return (rval);
3144 }
3145
3146
3147 /*
3148 * Each time a timeout kicks in, walk the wait queue, decrement the
3149 * the retry_interval, when the retry_interval becomes less than
3150 * or equal to zero, re-transport the command: If the re-transport
3151 * fails with BUSY, enqueue the command in the wait queue.
3152 *
3153 * In order to prevent looping forever because of commands enqueued
3154 * from within this function itself, save the current tail pointer
3155 * (in cur_tail) and exit the loop after serving this command.
3156 */
3157 static void
fp_resendcmd(void * port_handle)3158 fp_resendcmd(void *port_handle)
3159 {
3160 int rval;
3161 fc_local_port_t *port;
3162 fp_cmd_t *cmd;
3163 fp_cmd_t *cur_tail;
3164
3165 port = port_handle;
3166 mutex_enter(&port->fp_mutex);
3167 cur_tail = port->fp_wait_tail;
3168 mutex_exit(&port->fp_mutex);
3169
3170 while ((cmd = fp_deque_cmd(port)) != NULL) {
3171 cmd->cmd_retry_interval -= fp_retry_ticker;
3172 /* Check if we are detaching */
3173 if (port->fp_soft_state &
3174 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3175 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3176 cmd->cmd_pkt.pkt_reason = 0;
3177 fp_iodone(cmd);
3178 } else if (cmd->cmd_retry_interval <= 0) {
3179 rval = cmd->cmd_transport(port->fp_fca_handle,
3180 &cmd->cmd_pkt);
3181
3182 if (rval != FC_SUCCESS) {
3183 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3184 if (--cmd->cmd_retry_count) {
3185 fp_enque_cmd(port, cmd);
3186 if (cmd == cur_tail) {
3187 break;
3188 }
3189 continue;
3190 }
3191 cmd->cmd_pkt.pkt_state =
3192 FC_PKT_TRAN_BSY;
3193 } else {
3194 cmd->cmd_pkt.pkt_state =
3195 FC_PKT_TRAN_ERROR;
3196 }
3197 cmd->cmd_pkt.pkt_reason = 0;
3198 fp_iodone(cmd);
3199 } else {
3200 mutex_enter(&port->fp_mutex);
3201 port->fp_out_fpcmds++;
3202 mutex_exit(&port->fp_mutex);
3203 }
3204 } else {
3205 fp_enque_cmd(port, cmd);
3206 }
3207
3208 if (cmd == cur_tail) {
3209 break;
3210 }
3211 }
3212
3213 mutex_enter(&port->fp_mutex);
3214 if (port->fp_wait_head) {
3215 timeout_id_t tid;
3216
3217 mutex_exit(&port->fp_mutex);
3218 tid = timeout(fp_resendcmd, (caddr_t)port,
3219 fp_retry_ticks);
3220 mutex_enter(&port->fp_mutex);
3221 port->fp_wait_tid = tid;
3222 } else {
3223 port->fp_wait_tid = NULL;
3224 }
3225 mutex_exit(&port->fp_mutex);
3226 }
3227
3228
3229 /*
3230 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3231 *
3232 * Yes, as you can see below, cmd_retry_count is used here too. That means
3233 * the retries for BUSY are less if there were transport failures (transport
3234 * failure means fca_transport failure). The goal is not to exceed overall
3235 * retries set in the cmd_retry_count (whatever may be the reason for retry)
3236 *
3237 * Return Values:
3238 * FC_SUCCESS
3239 * FC_FAILURE
3240 */
3241 static int
fp_retry_cmd(fc_packet_t * pkt)3242 fp_retry_cmd(fc_packet_t *pkt)
3243 {
3244 fp_cmd_t *cmd;
3245
3246 cmd = pkt->pkt_ulp_private;
3247
3248 if (--cmd->cmd_retry_count) {
3249 fp_enque_cmd(cmd->cmd_port, cmd);
3250 return (FC_SUCCESS);
3251 } else {
3252 return (FC_FAILURE);
3253 }
3254 }
3255
3256
3257 /*
3258 * Queue up FC packet for deferred retry
3259 */
3260 static void
fp_enque_cmd(fc_local_port_t * port,fp_cmd_t * cmd)3261 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3262 {
3263 timeout_id_t tid;
3264
3265 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3266
3267 #ifdef DEBUG
3268 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3269 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3270 #endif
3271
3272 mutex_enter(&port->fp_mutex);
3273 if (port->fp_wait_tail) {
3274 port->fp_wait_tail->cmd_next = cmd;
3275 port->fp_wait_tail = cmd;
3276 } else {
3277 ASSERT(port->fp_wait_head == NULL);
3278 port->fp_wait_head = port->fp_wait_tail = cmd;
3279 if (port->fp_wait_tid == NULL) {
3280 mutex_exit(&port->fp_mutex);
3281 tid = timeout(fp_resendcmd, (caddr_t)port,
3282 fp_retry_ticks);
3283 mutex_enter(&port->fp_mutex);
3284 port->fp_wait_tid = tid;
3285 }
3286 }
3287 mutex_exit(&port->fp_mutex);
3288 }
3289
3290
3291 /*
3292 * Handle all RJT codes
3293 */
3294 static int
fp_handle_reject(fc_packet_t * pkt)3295 fp_handle_reject(fc_packet_t *pkt)
3296 {
3297 int rval = FC_FAILURE;
3298 uchar_t next_class;
3299 fp_cmd_t *cmd;
3300 fc_local_port_t *port;
3301
3302 cmd = pkt->pkt_ulp_private;
3303 port = cmd->cmd_port;
3304
3305 switch (pkt->pkt_state) {
3306 case FC_PKT_FABRIC_RJT:
3307 case FC_PKT_NPORT_RJT:
3308 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3309 next_class = fp_get_nextclass(cmd->cmd_port,
3310 FC_TRAN_CLASS(pkt->pkt_tran_flags));
3311
3312 if (next_class == FC_TRAN_CLASS_INVALID) {
3313 return (rval);
3314 }
3315 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3316 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3317
3318 rval = fp_sendcmd(cmd->cmd_port, cmd,
3319 cmd->cmd_port->fp_fca_handle);
3320
3321 if (rval != FC_SUCCESS) {
3322 pkt->pkt_state = FC_PKT_TRAN_ERROR;
3323 }
3324 }
3325 break;
3326
3327 case FC_PKT_LS_RJT:
3328 case FC_PKT_BA_RJT:
3329 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3330 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3331 cmd->cmd_retry_interval = fp_retry_delay;
3332 rval = fp_retry_cmd(pkt);
3333 }
3334 break;
3335
3336 case FC_PKT_FS_RJT:
3337 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) ||
3338 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) &&
3339 (pkt->pkt_expln == 0x00))) {
3340 cmd->cmd_retry_interval = fp_retry_delay;
3341 rval = fp_retry_cmd(pkt);
3342 }
3343 break;
3344
3345 case FC_PKT_LOCAL_RJT:
3346 if (pkt->pkt_reason == FC_REASON_QFULL) {
3347 cmd->cmd_retry_interval = fp_retry_delay;
3348 rval = fp_retry_cmd(pkt);
3349 }
3350 break;
3351
3352 default:
3353 FP_TRACE(FP_NHEAD1(1, 0),
3354 "fp_handle_reject(): Invalid pkt_state");
3355 break;
3356 }
3357
3358 return (rval);
3359 }
3360
3361
3362 /*
3363 * Return the next class of service supported by the FCA
3364 */
3365 static uchar_t
fp_get_nextclass(fc_local_port_t * port,uchar_t cur_class)3366 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3367 {
3368 uchar_t next_class;
3369
3370 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3371
3372 switch (cur_class) {
3373 case FC_TRAN_CLASS_INVALID:
3374 if (port->fp_cos & FC_NS_CLASS1) {
3375 next_class = FC_TRAN_CLASS1;
3376 break;
3377 }
3378 /* FALLTHROUGH */
3379
3380 case FC_TRAN_CLASS1:
3381 if (port->fp_cos & FC_NS_CLASS2) {
3382 next_class = FC_TRAN_CLASS2;
3383 break;
3384 }
3385 /* FALLTHROUGH */
3386
3387 case FC_TRAN_CLASS2:
3388 if (port->fp_cos & FC_NS_CLASS3) {
3389 next_class = FC_TRAN_CLASS3;
3390 break;
3391 }
3392 /* FALLTHROUGH */
3393
3394 case FC_TRAN_CLASS3:
3395 default:
3396 next_class = FC_TRAN_CLASS_INVALID;
3397 break;
3398 }
3399
3400 return (next_class);
3401 }
3402
3403
3404 /*
3405 * Determine if a class of service is supported by the FCA
3406 */
3407 static int
fp_is_class_supported(uint32_t cos,uchar_t tran_class)3408 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3409 {
3410 int rval;
3411
3412 switch (tran_class) {
3413 case FC_TRAN_CLASS1:
3414 if (cos & FC_NS_CLASS1) {
3415 rval = FC_SUCCESS;
3416 } else {
3417 rval = FC_FAILURE;
3418 }
3419 break;
3420
3421 case FC_TRAN_CLASS2:
3422 if (cos & FC_NS_CLASS2) {
3423 rval = FC_SUCCESS;
3424 } else {
3425 rval = FC_FAILURE;
3426 }
3427 break;
3428
3429 case FC_TRAN_CLASS3:
3430 if (cos & FC_NS_CLASS3) {
3431 rval = FC_SUCCESS;
3432 } else {
3433 rval = FC_FAILURE;
3434 }
3435 break;
3436
3437 default:
3438 rval = FC_FAILURE;
3439 break;
3440 }
3441
3442 return (rval);
3443 }
3444
3445
3446 /*
3447 * Dequeue FC packet for retry
3448 */
3449 static fp_cmd_t *
fp_deque_cmd(fc_local_port_t * port)3450 fp_deque_cmd(fc_local_port_t *port)
3451 {
3452 fp_cmd_t *cmd;
3453
3454 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3455
3456 mutex_enter(&port->fp_mutex);
3457
3458 if (port->fp_wait_head == NULL) {
3459 /*
3460 * To avoid races, NULL the fp_wait_tid as
3461 * we are about to exit the timeout thread.
3462 */
3463 port->fp_wait_tid = NULL;
3464 mutex_exit(&port->fp_mutex);
3465 return (NULL);
3466 }
3467
3468 cmd = port->fp_wait_head;
3469 port->fp_wait_head = cmd->cmd_next;
3470 cmd->cmd_next = NULL;
3471
3472 if (port->fp_wait_head == NULL) {
3473 port->fp_wait_tail = NULL;
3474 }
3475 mutex_exit(&port->fp_mutex);
3476
3477 return (cmd);
3478 }
3479
3480
3481 /*
3482 * Wait for job completion
3483 */
3484 static void
fp_jobwait(job_request_t * job)3485 fp_jobwait(job_request_t *job)
3486 {
3487 sema_p(&job->job_port_sema);
3488 }
3489
3490
3491 /*
3492 * Convert FC packet state to FC errno
3493 */
3494 int
fp_state_to_rval(uchar_t state)3495 fp_state_to_rval(uchar_t state)
3496 {
3497 int count;
3498
3499 for (count = 0; count < sizeof (fp_xlat) /
3500 sizeof (fp_xlat[0]); count++) {
3501 if (fp_xlat[count].xlat_state == state) {
3502 return (fp_xlat[count].xlat_rval);
3503 }
3504 }
3505
3506 return (FC_FAILURE);
3507 }
3508
3509
3510 /*
3511 * For Synchronous I/O requests, the caller is
3512 * expected to do fctl_jobdone(if necessary)
3513 *
3514 * We want to preserve at least one failure in the
3515 * job_result if it happens.
3516 *
3517 */
3518 static void
fp_iodone(fp_cmd_t * cmd)3519 fp_iodone(fp_cmd_t *cmd)
3520 {
3521 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt;
3522 job_request_t *job = cmd->cmd_job;
3523 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd;
3524
3525 ASSERT(job != NULL);
3526 ASSERT(cmd->cmd_port != NULL);
3527 ASSERT(&cmd->cmd_pkt != NULL);
3528
3529 mutex_enter(&job->job_mutex);
3530 if (job->job_result == FC_SUCCESS) {
3531 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3532 }
3533 mutex_exit(&job->job_mutex);
3534
3535 if (pd) {
3536 mutex_enter(&pd->pd_mutex);
3537 pd->pd_flags = PD_IDLE;
3538 mutex_exit(&pd->pd_mutex);
3539 }
3540
3541 if (ulp_pkt) {
3542 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3543 FP_IS_PKT_ERROR(ulp_pkt)) {
3544 fc_local_port_t *port;
3545 fc_remote_node_t *node;
3546
3547 port = cmd->cmd_port;
3548
3549 mutex_enter(&pd->pd_mutex);
3550 pd->