1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433     int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435     uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439     uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446     int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449     uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451     uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461     uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463     int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466     int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470     int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474     int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476     int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478     int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480     int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486     fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490     struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496     fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500     int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503     uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505     int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508     int);
509 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512     int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516     int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520     int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526     int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530     int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532     dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534     int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536     int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541     struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544     uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546     struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551     child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554     int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557     int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559     struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561     uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581     uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585     struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588     int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593     int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 
596 /*
597  * New functions added for mpxio support
598  */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602     int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604     dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610     int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612     fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 
615 /*
616  * New functions added for lun masking support
617  */
618 static void fcp_read_blacklist(dev_info_t *dip,
619     struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621     struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 
627 extern struct mod_ops	mod_driverops;
628 /*
629  * This variable is defined in modctl.c and set to '1' after the root driver
630  * and fs are loaded.  It serves as an indication that the root filesystem can
631  * be used.
632  */
633 extern int		modrootloaded;
634 /*
635  * This table contains strings associated with the SCSI sense key codes.  It
636  * is used by FCP to print a clear explanation of the code returned in the
637  * sense information by a device.
638  */
639 extern char		*sense_keys[];
640 /*
641  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
642  * under this device that the paths to a physical device are created when
643  * MPxIO is used.
644  */
645 extern dev_info_t	*scsi_vhci_dip;
646 
647 /*
648  * Report lun processing
649  */
650 #define	FCP_LUN_ADDRESSING		0x80
651 #define	FCP_PD_ADDRESSING		0x00
652 #define	FCP_VOLUME_ADDRESSING		0x40
653 
654 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
655 #define	MAX_INT_DMA			0x7fffffff
656 #define	FCP_MAX_SENSE_LEN		252
657 #define	FCP_MAX_RESPONSE_LEN		0xffffff
658 /*
659  * Property definitions
660  */
661 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
662 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
663 #define	TARGET_PROP	(char *)fcp_target_prop
664 #define	LUN_PROP	(char *)fcp_lun_prop
665 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
666 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
667 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
668 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
669 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
670 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
671 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
672 /*
673  * Short hand macros.
674  */
675 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
676 #define	LUN_TGT		(plun->lun_tgt)
677 
678 /*
679  * Driver private macros
680  */
681 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
682 			((x) >= 'a' && (x) <= 'f') ?			\
683 			((x) - 'a' + 10) : ((x) - 'A' + 10))
684 
685 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
686 
687 #define	FCP_N_NDI_EVENTS						\
688 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
689 
690 #define	FCP_LINK_STATE_CHANGED(p, c)			\
691 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
692 
693 #define	FCP_TGT_STATE_CHANGED(t, c)			\
694 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
695 
696 #define	FCP_STATE_CHANGED(p, t, c)		\
697 	(FCP_TGT_STATE_CHANGED(t, c))
698 
699 #define	FCP_MUST_RETRY(fpkt)				\
700 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
701 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
702 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
703 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
704 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
705 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
706 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
707 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
708 
709 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
710 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
711 	(es)->es_add_code == 0x3f &&		\
712 	(es)->es_qual_code == 0x0e)
713 
714 #define	FCP_SENSE_NO_LUN(es)			\
715 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
716 	(es)->es_add_code == 0x25 &&		\
717 	(es)->es_qual_code == 0x0)
718 
719 #define	FCP_VERSION		"1.188"
720 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
721 
722 #define	FCP_NUM_ELEMENTS(array)			\
723 	(sizeof (array) / sizeof ((array)[0]))
724 
725 /*
726  * Debugging, Error reporting, and tracing
727  */
728 #define	FCP_LOG_SIZE		1024 * 1024
729 
730 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
731 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
732 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
733 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
734 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
735 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
736 #define	FCP_LEVEL_7		0x00040
737 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
738 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
739 
740 
741 
742 /*
743  * Log contents to system messages file
744  */
745 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
746 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
747 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
748 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
749 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
750 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
751 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
752 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
753 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
754 
755 
756 /*
757  * Log contents to trace buffer
758  */
759 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
760 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
761 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
762 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
763 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
764 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
765 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
766 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
767 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
768 
769 
770 /*
771  * Log contents to both system messages file and trace buffer
772  */
773 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
774 				FC_TRACE_LOG_MSG)
775 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
776 				FC_TRACE_LOG_MSG)
777 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
778 				FC_TRACE_LOG_MSG)
779 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
780 				FC_TRACE_LOG_MSG)
781 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
782 				FC_TRACE_LOG_MSG)
783 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
784 				FC_TRACE_LOG_MSG)
785 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
786 				FC_TRACE_LOG_MSG)
787 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #ifdef DEBUG
792 #define	FCP_DTRACE	fc_trace_debug
793 #else
794 #define	FCP_DTRACE
795 #endif
796 
797 #define	FCP_TRACE	fc_trace_debug
798 
799 static struct cb_ops fcp_cb_ops = {
800 	fcp_open,			/* open */
801 	fcp_close,			/* close */
802 	nodev,				/* strategy */
803 	nodev,				/* print */
804 	nodev,				/* dump */
805 	nodev,				/* read */
806 	nodev,				/* write */
807 	fcp_ioctl,			/* ioctl */
808 	nodev,				/* devmap */
809 	nodev,				/* mmap */
810 	nodev,				/* segmap */
811 	nochpoll,			/* chpoll */
812 	ddi_prop_op,			/* cb_prop_op */
813 	0,				/* streamtab */
814 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
815 	CB_REV,				/* rev */
816 	nodev,				/* aread */
817 	nodev				/* awrite */
818 };
819 
820 
821 static struct dev_ops fcp_ops = {
822 	DEVO_REV,
823 	0,
824 	ddi_getinfo_1to1,
825 	nulldev,		/* identify */
826 	nulldev,		/* probe */
827 	fcp_attach,		/* attach and detach are mandatory */
828 	fcp_detach,
829 	nodev,			/* reset */
830 	&fcp_cb_ops,		/* cb_ops */
831 	NULL,			/* bus_ops */
832 	NULL,			/* power */
833 };
834 
835 
836 char *fcp_version = FCP_NAME_VERSION;
837 
838 static struct modldrv modldrv = {
839 	&mod_driverops,
840 	FCP_NAME_VERSION,
841 	&fcp_ops
842 };
843 
844 
845 static struct modlinkage modlinkage = {
846 	MODREV_1,
847 	&modldrv,
848 	NULL
849 };
850 
851 
852 static fc_ulp_modinfo_t fcp_modinfo = {
853 	&fcp_modinfo,			/* ulp_handle */
854 	FCTL_ULP_MODREV_4,		/* ulp_rev */
855 	FC4_SCSI_FCP,			/* ulp_type */
856 	"fcp",				/* ulp_name */
857 	FCP_STATEC_MASK,		/* ulp_statec_mask */
858 	fcp_port_attach,		/* ulp_port_attach */
859 	fcp_port_detach,		/* ulp_port_detach */
860 	fcp_port_ioctl,			/* ulp_port_ioctl */
861 	fcp_els_callback,		/* ulp_els_callback */
862 	fcp_data_callback,		/* ulp_data_callback */
863 	fcp_statec_callback		/* ulp_statec_callback */
864 };
865 
866 #ifdef	DEBUG
867 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
868 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
869 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
870 				FCP_LEVEL_6 | FCP_LEVEL_7)
871 #else
872 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
873 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
874 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
875 				FCP_LEVEL_6 | FCP_LEVEL_7)
876 #endif
877 
878 /* FCP global variables */
879 int			fcp_bus_config_debug = 0;
880 static int		fcp_log_size = FCP_LOG_SIZE;
881 static int		fcp_trace = FCP_TRACE_DEFAULT;
882 static fc_trace_logq_t	*fcp_logq = NULL;
883 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
884 /*
885  * The auto-configuration is set by default.  The only way of disabling it is
886  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
887  */
888 static int		fcp_enable_auto_configuration = 1;
889 static int		fcp_max_bus_config_retries	= 4;
890 static int		fcp_lun_ready_retry = 300;
891 /*
892  * The value assigned to the following variable has changed several times due
893  * to a problem with the data underruns reporting of some firmware(s).	The
894  * current value of 50 gives a timeout value of 25 seconds for a max number
895  * of 256 LUNs.
896  */
897 static int		fcp_max_target_retries = 50;
898 /*
899  * Watchdog variables
900  * ------------------
901  *
902  * fcp_watchdog_init
903  *
904  *	Indicates if the watchdog timer is running or not.  This is actually
905  *	a counter of the number of Fibre Channel ports that attached.  When
906  *	the first port attaches the watchdog is started.  When the last port
907  *	detaches the watchdog timer is stopped.
908  *
909  * fcp_watchdog_time
910  *
911  *	This is the watchdog clock counter.  It is incremented by
912  *	fcp_watchdog_time each time the watchdog timer expires.
913  *
914  * fcp_watchdog_timeout
915  *
916  *	Increment value of the variable fcp_watchdog_time as well as the
917  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
918  *	is strange that this is not a #define	but a variable since the code
919  *	never changes this value.  The reason why it can be said that the
920  *	unit is 1 second is because the number of ticks for the watchdog
921  *	timer is determined like this:
922  *
923  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
924  *				  drv_usectohz(1000000);
925  *
926  *	The value 1000000 is hard coded in the code.
927  *
928  * fcp_watchdog_tick
929  *
930  *	Watchdog timer value in ticks.
931  */
932 static int		fcp_watchdog_init = 0;
933 static int		fcp_watchdog_time = 0;
934 static int		fcp_watchdog_timeout = 1;
935 static int		fcp_watchdog_tick;
936 
937 /*
938  * fcp_offline_delay is a global variable to enable customisation of
939  * the timeout on link offlines or RSCNs. The default value is set
940  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
941  * specified in FCP4 Chapter 11 (see www.t10.org).
942  *
943  * The variable fcp_offline_delay is specified in SECONDS.
944  *
945  * If we made this a static var then the user would not be able to
946  * change it. This variable is set in fcp_attach().
947  */
948 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
949 
950 static void		*fcp_softstate = NULL; /* for soft state */
951 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
952 static kmutex_t		fcp_global_mutex;
953 static kmutex_t		fcp_ioctl_mutex;
954 static dev_info_t	*fcp_global_dip = NULL;
955 static timeout_id_t	fcp_watchdog_id;
956 const char		*fcp_lun_prop = "lun";
957 const char		*fcp_sam_lun_prop = "sam-lun";
958 const char		*fcp_target_prop = "target";
959 /*
960  * NOTE: consumers of "node-wwn" property include stmsboot in ON
961  * consolidation.
962  */
963 const char		*fcp_node_wwn_prop = "node-wwn";
964 const char		*fcp_port_wwn_prop = "port-wwn";
965 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
966 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
967 const char		*fcp_manual_config_only = "manual_configuration_only";
968 const char		*fcp_init_port_prop = "initiator-port";
969 const char		*fcp_tgt_port_prop = "target-port";
970 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
971 
972 static struct fcp_port	*fcp_port_head = NULL;
973 static ddi_eventcookie_t	fcp_insert_eid;
974 static ddi_eventcookie_t	fcp_remove_eid;
975 
976 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
977 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
978 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
979 };
980 
981 /*
982  * List of valid commands for the scsi_ioctl call
983  */
984 static uint8_t scsi_ioctl_list[] = {
985 	SCMD_INQUIRY,
986 	SCMD_REPORT_LUN,
987 	SCMD_READ_CAPACITY
988 };
989 
990 /*
991  * this is used to dummy up a report lun response for cases
992  * where the target doesn't support it
993  */
994 static uchar_t fcp_dummy_lun[] = {
995 	0x00,		/* MSB length (length = no of luns * 8) */
996 	0x00,
997 	0x00,
998 	0x08,		/* LSB length */
999 	0x00,		/* MSB reserved */
1000 	0x00,
1001 	0x00,
1002 	0x00,		/* LSB reserved */
1003 	FCP_PD_ADDRESSING,
1004 	0x00,		/* LUN is ZERO at the first level */
1005 	0x00,
1006 	0x00,		/* second level is zero */
1007 	0x00,
1008 	0x00,		/* third level is zero */
1009 	0x00,
1010 	0x00		/* fourth level is zero */
1011 };
1012 
1013 static uchar_t fcp_alpa_to_switch[] = {
1014 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1015 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1016 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1017 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1018 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1019 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1020 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1021 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1022 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1023 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1024 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1025 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1026 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1027 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1030 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1031 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1032 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1033 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1034 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1035 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1036 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1037 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 };
1039 
1040 static caddr_t pid = "SESS01	      ";
1041 
1042 #if	!defined(lint)
1043 
1044 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1045     fcp_port::fcp_next fcp_watchdog_id))
1046 
1047 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1048 
1049 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1050     fcp_insert_eid
1051     fcp_remove_eid
1052     fcp_watchdog_time))
1053 
1054 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1055     fcp_cb_ops
1056     fcp_ops
1057     callb_cpr))
1058 
1059 #endif /* lint */
1060 
1061 /*
1062  * This table is used to determine whether or not it's safe to copy in
1063  * the target node name for a lun.  Since all luns behind the same target
1064  * have the same wwnn, only tagets that do not support multiple luns are
1065  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1066  */
1067 
1068 char *fcp_symmetric_disk_table[] = {
1069 	"SEAGATE ST",
1070 	"IBM	 DDYFT",
1071 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1072 	"SUN	 SENA",		/* SES device */
1073 	"SUN	 SESS01"	/* VICOM SVE box */
1074 };
1075 
1076 int fcp_symmetric_disk_table_size =
1077 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1078 
1079 /*
1080  * The _init(9e) return value should be that of mod_install(9f). Under
1081  * some circumstances, a failure may not be related mod_install(9f) and
1082  * one would then require a return value to indicate the failure. Looking
1083  * at mod_install(9f), it is expected to return 0 for success and non-zero
1084  * for failure. mod_install(9f) for device drivers, further goes down the
1085  * calling chain and ends up in ddi_installdrv(), whose return values are
1086  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1087  * calling chain of mod_install(9f) which return values like EINVAL and
1088  * in some even return -1.
1089  *
1090  * To work around the vagaries of the mod_install() calling chain, return
1091  * either 0 or ENODEV depending on the success or failure of mod_install()
1092  */
1093 int
1094 _init(void)
1095 {
1096 	int rval;
1097 
1098 	/*
1099 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1100 	 * before registering with the transport first.
1101 	 */
1102 	if (ddi_soft_state_init(&fcp_softstate,
1103 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1104 		return (EINVAL);
1105 	}
1106 
1107 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1108 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1109 
1110 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1111 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1112 		mutex_destroy(&fcp_global_mutex);
1113 		mutex_destroy(&fcp_ioctl_mutex);
1114 		ddi_soft_state_fini(&fcp_softstate);
1115 		return (ENODEV);
1116 	}
1117 
1118 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1119 
1120 	if ((rval = mod_install(&modlinkage)) != 0) {
1121 		fc_trace_free_logq(fcp_logq);
1122 		(void) fc_ulp_remove(&fcp_modinfo);
1123 		mutex_destroy(&fcp_global_mutex);
1124 		mutex_destroy(&fcp_ioctl_mutex);
1125 		ddi_soft_state_fini(&fcp_softstate);
1126 		rval = ENODEV;
1127 	}
1128 
1129 	return (rval);
1130 }
1131 
1132 
1133 /*
1134  * the system is done with us as a driver, so clean up
1135  */
1136 int
1137 _fini(void)
1138 {
1139 	int rval;
1140 
1141 	/*
1142 	 * don't start cleaning up until we know that the module remove
1143 	 * has worked  -- if this works, then we know that each instance
1144 	 * has successfully been DDI_DETACHed
1145 	 */
1146 	if ((rval = mod_remove(&modlinkage)) != 0) {
1147 		return (rval);
1148 	}
1149 
1150 	(void) fc_ulp_remove(&fcp_modinfo);
1151 
1152 	ddi_soft_state_fini(&fcp_softstate);
1153 	mutex_destroy(&fcp_global_mutex);
1154 	mutex_destroy(&fcp_ioctl_mutex);
1155 	fc_trace_free_logq(fcp_logq);
1156 
1157 	return (rval);
1158 }
1159 
1160 
1161 int
1162 _info(struct modinfo *modinfop)
1163 {
1164 	return (mod_info(&modlinkage, modinfop));
1165 }
1166 
1167 
1168 /*
1169  * attach the module
1170  */
1171 static int
1172 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1173 {
1174 	int rval = DDI_SUCCESS;
1175 
1176 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1177 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1178 
1179 	if (cmd == DDI_ATTACH) {
1180 		/* The FCP pseudo device is created here. */
1181 		mutex_enter(&fcp_global_mutex);
1182 		fcp_global_dip = devi;
1183 		mutex_exit(&fcp_global_mutex);
1184 
1185 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1186 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1187 			ddi_report_dev(fcp_global_dip);
1188 		} else {
1189 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1190 			mutex_enter(&fcp_global_mutex);
1191 			fcp_global_dip = NULL;
1192 			mutex_exit(&fcp_global_mutex);
1193 
1194 			rval = DDI_FAILURE;
1195 		}
1196 		/*
1197 		 * We check the fcp_offline_delay property at this
1198 		 * point. This variable is global for the driver,
1199 		 * not specific to an instance.
1200 		 *
1201 		 * We do not recommend setting the value to less
1202 		 * than 10 seconds (RA_TOV_els), or greater than
1203 		 * 60 seconds.
1204 		 */
1205 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1206 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1207 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1208 		if ((fcp_offline_delay < 10) ||
1209 		    (fcp_offline_delay > 60)) {
1210 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1211 			    "to %d second(s). This is outside the "
1212 			    "recommended range of 10..60 seconds.",
1213 			    fcp_offline_delay);
1214 		}
1215 	}
1216 
1217 	return (rval);
1218 }
1219 
1220 
1221 /*ARGSUSED*/
1222 static int
1223 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1224 {
1225 	int	res = DDI_SUCCESS;
1226 
1227 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1228 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1229 
1230 	if (cmd == DDI_DETACH) {
1231 		/*
1232 		 * Check if there are active ports/threads. If there
1233 		 * are any, we will fail, else we will succeed (there
1234 		 * should not be much to clean up)
1235 		 */
1236 		mutex_enter(&fcp_global_mutex);
1237 		FCP_DTRACE(fcp_logq, "fcp",
1238 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1239 		    (void *) fcp_port_head);
1240 
1241 		if (fcp_port_head == NULL) {
1242 			ddi_remove_minor_node(fcp_global_dip, NULL);
1243 			fcp_global_dip = NULL;
1244 			mutex_exit(&fcp_global_mutex);
1245 		} else {
1246 			mutex_exit(&fcp_global_mutex);
1247 			res = DDI_FAILURE;
1248 		}
1249 	}
1250 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1251 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1252 
1253 	return (res);
1254 }
1255 
1256 
1257 /* ARGSUSED */
1258 static int
1259 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1260 {
1261 	if (otype != OTYP_CHR) {
1262 		return (EINVAL);
1263 	}
1264 
1265 	/*
1266 	 * Allow only root to talk;
1267 	 */
1268 	if (drv_priv(credp)) {
1269 		return (EPERM);
1270 	}
1271 
1272 	mutex_enter(&fcp_global_mutex);
1273 	if (fcp_oflag & FCP_EXCL) {
1274 		mutex_exit(&fcp_global_mutex);
1275 		return (EBUSY);
1276 	}
1277 
1278 	if (flag & FEXCL) {
1279 		if (fcp_oflag & FCP_OPEN) {
1280 			mutex_exit(&fcp_global_mutex);
1281 			return (EBUSY);
1282 		}
1283 		fcp_oflag |= FCP_EXCL;
1284 	}
1285 	fcp_oflag |= FCP_OPEN;
1286 	mutex_exit(&fcp_global_mutex);
1287 
1288 	return (0);
1289 }
1290 
1291 
1292 /* ARGSUSED */
1293 static int
1294 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1295 {
1296 	if (otype != OTYP_CHR) {
1297 		return (EINVAL);
1298 	}
1299 
1300 	mutex_enter(&fcp_global_mutex);
1301 	if (!(fcp_oflag & FCP_OPEN)) {
1302 		mutex_exit(&fcp_global_mutex);
1303 		return (ENODEV);
1304 	}
1305 	fcp_oflag = FCP_IDLE;
1306 	mutex_exit(&fcp_global_mutex);
1307 
1308 	return (0);
1309 }
1310 
1311 
1312 /*
1313  * fcp_ioctl
1314  *	Entry point for the FCP ioctls
1315  *
1316  * Input:
1317  *	See ioctl(9E)
1318  *
1319  * Output:
1320  *	See ioctl(9E)
1321  *
1322  * Returns:
1323  *	See ioctl(9E)
1324  *
1325  * Context:
1326  *	Kernel context.
1327  */
1328 /* ARGSUSED */
1329 static int
1330 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1331     int *rval)
1332 {
1333 	int			ret = 0;
1334 
1335 	mutex_enter(&fcp_global_mutex);
1336 	if (!(fcp_oflag & FCP_OPEN)) {
1337 		mutex_exit(&fcp_global_mutex);
1338 		return (ENXIO);
1339 	}
1340 	mutex_exit(&fcp_global_mutex);
1341 
1342 	switch (cmd) {
1343 	case FCP_TGT_INQUIRY:
1344 	case FCP_TGT_CREATE:
1345 	case FCP_TGT_DELETE:
1346 		ret = fcp_setup_device_data_ioctl(cmd,
1347 		    (struct fcp_ioctl *)data, mode, rval);
1348 		break;
1349 
1350 	case FCP_TGT_SEND_SCSI:
1351 		mutex_enter(&fcp_ioctl_mutex);
1352 		ret = fcp_setup_scsi_ioctl(
1353 		    (struct fcp_scsi_cmd *)data, mode, rval);
1354 		mutex_exit(&fcp_ioctl_mutex);
1355 		break;
1356 
1357 	case FCP_STATE_COUNT:
1358 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1359 		    mode, rval);
1360 		break;
1361 	case FCP_GET_TARGET_MAPPINGS:
1362 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1363 		    mode, rval);
1364 		break;
1365 	default:
1366 		fcp_log(CE_WARN, NULL,
1367 		    "!Invalid ioctl opcode = 0x%x", cmd);
1368 		ret	= EINVAL;
1369 	}
1370 
1371 	return (ret);
1372 }
1373 
1374 
1375 /*
1376  * fcp_setup_device_data_ioctl
1377  *	Setup handler for the "device data" style of
1378  *	ioctl for FCP.	See "fcp_util.h" for data structure
1379  *	definition.
1380  *
1381  * Input:
1382  *	cmd	= FCP ioctl command
1383  *	data	= ioctl data
1384  *	mode	= See ioctl(9E)
1385  *
1386  * Output:
1387  *	data	= ioctl data
1388  *	rval	= return value - see ioctl(9E)
1389  *
1390  * Returns:
1391  *	See ioctl(9E)
1392  *
1393  * Context:
1394  *	Kernel context.
1395  */
1396 /* ARGSUSED */
1397 static int
1398 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1399     int *rval)
1400 {
1401 	struct fcp_port	*pptr;
1402 	struct	device_data	*dev_data;
1403 	uint32_t		link_cnt;
1404 	la_wwn_t		*wwn_ptr = NULL;
1405 	struct fcp_tgt		*ptgt = NULL;
1406 	struct fcp_lun		*plun = NULL;
1407 	int			i, error;
1408 	struct fcp_ioctl	fioctl;
1409 
1410 #ifdef	_MULTI_DATAMODEL
1411 	switch (ddi_model_convert_from(mode & FMODELS)) {
1412 	case DDI_MODEL_ILP32: {
1413 		struct fcp32_ioctl f32_ioctl;
1414 
1415 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1416 		    sizeof (struct fcp32_ioctl), mode)) {
1417 			return (EFAULT);
1418 		}
1419 		fioctl.fp_minor = f32_ioctl.fp_minor;
1420 		fioctl.listlen = f32_ioctl.listlen;
1421 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1422 		break;
1423 	}
1424 	case DDI_MODEL_NONE:
1425 		if (ddi_copyin((void *)data, (void *)&fioctl,
1426 		    sizeof (struct fcp_ioctl), mode)) {
1427 			return (EFAULT);
1428 		}
1429 		break;
1430 	}
1431 
1432 #else	/* _MULTI_DATAMODEL */
1433 	if (ddi_copyin((void *)data, (void *)&fioctl,
1434 	    sizeof (struct fcp_ioctl), mode)) {
1435 		return (EFAULT);
1436 	}
1437 #endif	/* _MULTI_DATAMODEL */
1438 
1439 	/*
1440 	 * Right now we can assume that the minor number matches with
1441 	 * this instance of fp. If this changes we will need to
1442 	 * revisit this logic.
1443 	 */
1444 	mutex_enter(&fcp_global_mutex);
1445 	pptr = fcp_port_head;
1446 	while (pptr) {
1447 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1448 			break;
1449 		} else {
1450 			pptr = pptr->port_next;
1451 		}
1452 	}
1453 	mutex_exit(&fcp_global_mutex);
1454 	if (pptr == NULL) {
1455 		return (ENXIO);
1456 	}
1457 	mutex_enter(&pptr->port_mutex);
1458 
1459 
1460 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1461 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1462 		mutex_exit(&pptr->port_mutex);
1463 		return (ENOMEM);
1464 	}
1465 
1466 	if (ddi_copyin(fioctl.list, dev_data,
1467 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1468 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1469 		mutex_exit(&pptr->port_mutex);
1470 		return (EFAULT);
1471 	}
1472 	link_cnt = pptr->port_link_cnt;
1473 
1474 	if (cmd == FCP_TGT_INQUIRY) {
1475 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1476 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1477 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1478 			/* This ioctl is requesting INQ info of local HBA */
1479 			mutex_exit(&pptr->port_mutex);
1480 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1481 			dev_data[0].dev_status = 0;
1482 			if (ddi_copyout(dev_data, fioctl.list,
1483 			    (sizeof (struct device_data)) * fioctl.listlen,
1484 			    mode)) {
1485 				kmem_free(dev_data,
1486 				    sizeof (*dev_data) * fioctl.listlen);
1487 				return (EFAULT);
1488 			}
1489 			kmem_free(dev_data,
1490 			    sizeof (*dev_data) * fioctl.listlen);
1491 #ifdef	_MULTI_DATAMODEL
1492 			switch (ddi_model_convert_from(mode & FMODELS)) {
1493 			case DDI_MODEL_ILP32: {
1494 				struct fcp32_ioctl f32_ioctl;
1495 				f32_ioctl.fp_minor = fioctl.fp_minor;
1496 				f32_ioctl.listlen = fioctl.listlen;
1497 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1498 				if (ddi_copyout((void *)&f32_ioctl,
1499 				    (void *)data,
1500 				    sizeof (struct fcp32_ioctl), mode)) {
1501 					return (EFAULT);
1502 				}
1503 				break;
1504 			}
1505 			case DDI_MODEL_NONE:
1506 				if (ddi_copyout((void *)&fioctl, (void *)data,
1507 				    sizeof (struct fcp_ioctl), mode)) {
1508 					return (EFAULT);
1509 				}
1510 				break;
1511 			}
1512 #else	/* _MULTI_DATAMODEL */
1513 			if (ddi_copyout((void *)&fioctl, (void *)data,
1514 			    sizeof (struct fcp_ioctl), mode)) {
1515 				return (EFAULT);
1516 			}
1517 #endif	/* _MULTI_DATAMODEL */
1518 			return (0);
1519 		}
1520 	}
1521 
1522 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1523 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1524 		mutex_exit(&pptr->port_mutex);
1525 		return (ENXIO);
1526 	}
1527 
1528 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1529 	    i++) {
1530 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1531 
1532 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1533 
1534 
1535 		dev_data[i].dev_status = ENXIO;
1536 
1537 		if ((ptgt = fcp_lookup_target(pptr,
1538 		    (uchar_t *)wwn_ptr)) == NULL) {
1539 			mutex_exit(&pptr->port_mutex);
1540 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1541 			    wwn_ptr, &error, 0) == NULL) {
1542 				dev_data[i].dev_status = ENODEV;
1543 				mutex_enter(&pptr->port_mutex);
1544 				continue;
1545 			} else {
1546 
1547 				dev_data[i].dev_status = EAGAIN;
1548 
1549 				mutex_enter(&pptr->port_mutex);
1550 				continue;
1551 			}
1552 		} else {
1553 			mutex_enter(&ptgt->tgt_mutex);
1554 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1555 			    FCP_TGT_BUSY)) {
1556 				dev_data[i].dev_status = EAGAIN;
1557 				mutex_exit(&ptgt->tgt_mutex);
1558 				continue;
1559 			}
1560 
1561 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1562 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1563 					dev_data[i].dev_status = ENOTSUP;
1564 				} else {
1565 					dev_data[i].dev_status = ENXIO;
1566 				}
1567 				mutex_exit(&ptgt->tgt_mutex);
1568 				continue;
1569 			}
1570 
1571 			switch (cmd) {
1572 			case FCP_TGT_INQUIRY:
1573 				/*
1574 				 * The reason we give device type of
1575 				 * lun 0 only even though in some
1576 				 * cases(like maxstrat) lun 0 device
1577 				 * type may be 0x3f(invalid) is that
1578 				 * for bridge boxes target will appear
1579 				 * as luns and the first lun could be
1580 				 * a device that utility may not care
1581 				 * about (like a tape device).
1582 				 */
1583 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1584 				dev_data[i].dev_status = 0;
1585 				mutex_exit(&ptgt->tgt_mutex);
1586 
1587 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1588 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1589 				} else {
1590 					dev_data[i].dev0_type = plun->lun_type;
1591 				}
1592 				mutex_enter(&ptgt->tgt_mutex);
1593 				break;
1594 
1595 			case FCP_TGT_CREATE:
1596 				mutex_exit(&ptgt->tgt_mutex);
1597 				mutex_exit(&pptr->port_mutex);
1598 
1599 				/*
1600 				 * serialize state change call backs.
1601 				 * only one call back will be handled
1602 				 * at a time.
1603 				 */
1604 				mutex_enter(&fcp_global_mutex);
1605 				if (fcp_oflag & FCP_BUSY) {
1606 					mutex_exit(&fcp_global_mutex);
1607 					if (dev_data) {
1608 						kmem_free(dev_data,
1609 						    sizeof (*dev_data) *
1610 						    fioctl.listlen);
1611 					}
1612 					return (EBUSY);
1613 				}
1614 				fcp_oflag |= FCP_BUSY;
1615 				mutex_exit(&fcp_global_mutex);
1616 
1617 				dev_data[i].dev_status =
1618 				    fcp_create_on_demand(pptr,
1619 				    wwn_ptr->raw_wwn);
1620 
1621 				if (dev_data[i].dev_status != 0) {
1622 					char	buf[25];
1623 
1624 					for (i = 0; i < FC_WWN_SIZE; i++) {
1625 						(void) sprintf(&buf[i << 1],
1626 						    "%02x",
1627 						    wwn_ptr->raw_wwn[i]);
1628 					}
1629 
1630 					fcp_log(CE_WARN, pptr->port_dip,
1631 					    "!Failed to create nodes for"
1632 					    " pwwn=%s; error=%x", buf,
1633 					    dev_data[i].dev_status);
1634 				}
1635 
1636 				/* allow state change call backs again */
1637 				mutex_enter(&fcp_global_mutex);
1638 				fcp_oflag &= ~FCP_BUSY;
1639 				mutex_exit(&fcp_global_mutex);
1640 
1641 				mutex_enter(&pptr->port_mutex);
1642 				mutex_enter(&ptgt->tgt_mutex);
1643 
1644 				break;
1645 
1646 			case FCP_TGT_DELETE:
1647 				break;
1648 
1649 			default:
1650 				fcp_log(CE_WARN, pptr->port_dip,
1651 				    "!Invalid device data ioctl "
1652 				    "opcode = 0x%x", cmd);
1653 			}
1654 			mutex_exit(&ptgt->tgt_mutex);
1655 		}
1656 	}
1657 	mutex_exit(&pptr->port_mutex);
1658 
1659 	if (ddi_copyout(dev_data, fioctl.list,
1660 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1661 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1662 		return (EFAULT);
1663 	}
1664 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1665 
1666 #ifdef	_MULTI_DATAMODEL
1667 	switch (ddi_model_convert_from(mode & FMODELS)) {
1668 	case DDI_MODEL_ILP32: {
1669 		struct fcp32_ioctl f32_ioctl;
1670 
1671 		f32_ioctl.fp_minor = fioctl.fp_minor;
1672 		f32_ioctl.listlen = fioctl.listlen;
1673 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1674 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1675 		    sizeof (struct fcp32_ioctl), mode)) {
1676 			return (EFAULT);
1677 		}
1678 		break;
1679 	}
1680 	case DDI_MODEL_NONE:
1681 		if (ddi_copyout((void *)&fioctl, (void *)data,
1682 		    sizeof (struct fcp_ioctl), mode)) {
1683 			return (EFAULT);
1684 		}
1685 		break;
1686 	}
1687 #else	/* _MULTI_DATAMODEL */
1688 
1689 	if (ddi_copyout((void *)&fioctl, (void *)data,
1690 	    sizeof (struct fcp_ioctl), mode)) {
1691 		return (EFAULT);
1692 	}
1693 #endif	/* _MULTI_DATAMODEL */
1694 
1695 	return (0);
1696 }
1697 
1698 /*
1699  * Fetch the target mappings (path, etc.) for all LUNs
1700  * on this port.
1701  */
1702 /* ARGSUSED */
1703 static int
1704 fcp_get_target_mappings(struct fcp_ioctl *data,
1705     int mode, int *rval)
1706 {
1707 	struct fcp_port	    *pptr;
1708 	fc_hba_target_mappings_t    *mappings;
1709 	fc_hba_mapping_entry_t	    *map;
1710 	struct fcp_tgt	    *ptgt = NULL;
1711 	struct fcp_lun	    *plun = NULL;
1712 	int			    i, mapIndex, mappingSize;
1713 	int			    listlen;
1714 	struct fcp_ioctl	    fioctl;
1715 	char			    *path;
1716 	fcp_ent_addr_t		    sam_lun_addr;
1717 
1718 #ifdef	_MULTI_DATAMODEL
1719 	switch (ddi_model_convert_from(mode & FMODELS)) {
1720 	case DDI_MODEL_ILP32: {
1721 		struct fcp32_ioctl f32_ioctl;
1722 
1723 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1724 		    sizeof (struct fcp32_ioctl), mode)) {
1725 			return (EFAULT);
1726 		}
1727 		fioctl.fp_minor = f32_ioctl.fp_minor;
1728 		fioctl.listlen = f32_ioctl.listlen;
1729 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1730 		break;
1731 	}
1732 	case DDI_MODEL_NONE:
1733 		if (ddi_copyin((void *)data, (void *)&fioctl,
1734 		    sizeof (struct fcp_ioctl), mode)) {
1735 			return (EFAULT);
1736 		}
1737 		break;
1738 	}
1739 
1740 #else	/* _MULTI_DATAMODEL */
1741 	if (ddi_copyin((void *)data, (void *)&fioctl,
1742 	    sizeof (struct fcp_ioctl), mode)) {
1743 		return (EFAULT);
1744 	}
1745 #endif	/* _MULTI_DATAMODEL */
1746 
1747 	/*
1748 	 * Right now we can assume that the minor number matches with
1749 	 * this instance of fp. If this changes we will need to
1750 	 * revisit this logic.
1751 	 */
1752 	mutex_enter(&fcp_global_mutex);
1753 	pptr = fcp_port_head;
1754 	while (pptr) {
1755 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1756 			break;
1757 		} else {
1758 			pptr = pptr->port_next;
1759 		}
1760 	}
1761 	mutex_exit(&fcp_global_mutex);
1762 	if (pptr == NULL) {
1763 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1764 		    fioctl.fp_minor);
1765 		return (ENXIO);
1766 	}
1767 
1768 
1769 	/* We use listlen to show the total buffer size */
1770 	mappingSize = fioctl.listlen;
1771 
1772 	/* Now calculate how many mapping entries will fit */
1773 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1774 	    - sizeof (fc_hba_target_mappings_t);
1775 	if (listlen <= 0) {
1776 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1777 		return (ENXIO);
1778 	}
1779 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1780 
1781 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1782 		return (ENOMEM);
1783 	}
1784 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1785 
1786 	/* Now get to work */
1787 	mapIndex = 0;
1788 
1789 	mutex_enter(&pptr->port_mutex);
1790 	/* Loop through all targets on this port */
1791 	for (i = 0; i < FCP_NUM_HASH; i++) {
1792 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1793 		    ptgt = ptgt->tgt_next) {
1794 
1795 
1796 			/* Loop through all LUNs on this target */
1797 			for (plun = ptgt->tgt_lun; plun != NULL;
1798 			    plun = plun->lun_next) {
1799 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1800 					continue;
1801 				}
1802 
1803 				path = fcp_get_lun_path(plun);
1804 				if (path == NULL) {
1805 					continue;
1806 				}
1807 
1808 				if (mapIndex >= listlen) {
1809 					mapIndex ++;
1810 					kmem_free(path, MAXPATHLEN);
1811 					continue;
1812 				}
1813 				map = &mappings->entries[mapIndex++];
1814 				bcopy(path, map->targetDriver,
1815 				    sizeof (map->targetDriver));
1816 				map->d_id = ptgt->tgt_d_id;
1817 				map->busNumber = 0;
1818 				map->targetNumber = ptgt->tgt_d_id;
1819 				map->osLUN = plun->lun_num;
1820 
1821 				/*
1822 				 * We had swapped lun when we stored it in
1823 				 * lun_addr. We need to swap it back before
1824 				 * returning it to user land
1825 				 */
1826 
1827 				sam_lun_addr.ent_addr_0 =
1828 				    BE_16(plun->lun_addr.ent_addr_0);
1829 				sam_lun_addr.ent_addr_1 =
1830 				    BE_16(plun->lun_addr.ent_addr_1);
1831 				sam_lun_addr.ent_addr_2 =
1832 				    BE_16(plun->lun_addr.ent_addr_2);
1833 				sam_lun_addr.ent_addr_3 =
1834 				    BE_16(plun->lun_addr.ent_addr_3);
1835 
1836 				bcopy(&sam_lun_addr, &map->samLUN,
1837 				    FCP_LUN_SIZE);
1838 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1839 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1840 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1841 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1842 
1843 				if (plun->lun_guid) {
1844 
1845 					/* convert ascii wwn to bytes */
1846 					fcp_ascii_to_wwn(plun->lun_guid,
1847 					    map->guid, sizeof (map->guid));
1848 
1849 					if ((sizeof (map->guid)) <
1850 					    plun->lun_guid_size / 2) {
1851 						cmn_err(CE_WARN,
1852 						    "fcp_get_target_mappings:"
1853 						    "guid copy space "
1854 						    "insufficient."
1855 						    "Copy Truncation - "
1856 						    "available %d; need %d",
1857 						    (int)sizeof (map->guid),
1858 						    (int)
1859 						    plun->lun_guid_size / 2);
1860 					}
1861 				}
1862 				kmem_free(path, MAXPATHLEN);
1863 			}
1864 		}
1865 	}
1866 	mutex_exit(&pptr->port_mutex);
1867 	mappings->numLuns = mapIndex;
1868 
1869 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1870 		kmem_free(mappings, mappingSize);
1871 		return (EFAULT);
1872 	}
1873 	kmem_free(mappings, mappingSize);
1874 
1875 #ifdef	_MULTI_DATAMODEL
1876 	switch (ddi_model_convert_from(mode & FMODELS)) {
1877 	case DDI_MODEL_ILP32: {
1878 		struct fcp32_ioctl f32_ioctl;
1879 
1880 		f32_ioctl.fp_minor = fioctl.fp_minor;
1881 		f32_ioctl.listlen = fioctl.listlen;
1882 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1883 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1884 		    sizeof (struct fcp32_ioctl), mode)) {
1885 			return (EFAULT);
1886 		}
1887 		break;
1888 	}
1889 	case DDI_MODEL_NONE:
1890 		if (ddi_copyout((void *)&fioctl, (void *)data,
1891 		    sizeof (struct fcp_ioctl), mode)) {
1892 			return (EFAULT);
1893 		}
1894 		break;
1895 	}
1896 #else	/* _MULTI_DATAMODEL */
1897 
1898 	if (ddi_copyout((void *)&fioctl, (void *)data,
1899 	    sizeof (struct fcp_ioctl), mode)) {
1900 		return (EFAULT);
1901 	}
1902 #endif	/* _MULTI_DATAMODEL */
1903 
1904 	return (0);
1905 }
1906 
1907 /*
1908  * fcp_setup_scsi_ioctl
1909  *	Setup handler for the "scsi passthru" style of
1910  *	ioctl for FCP.	See "fcp_util.h" for data structure
1911  *	definition.
1912  *
1913  * Input:
1914  *	u_fscsi	= ioctl data (user address space)
1915  *	mode	= See ioctl(9E)
1916  *
1917  * Output:
1918  *	u_fscsi	= ioctl data (user address space)
1919  *	rval	= return value - see ioctl(9E)
1920  *
1921  * Returns:
1922  *	0	= OK
1923  *	EAGAIN	= See errno.h
1924  *	EBUSY	= See errno.h
1925  *	EFAULT	= See errno.h
1926  *	EINTR	= See errno.h
1927  *	EINVAL	= See errno.h
1928  *	EIO	= See errno.h
1929  *	ENOMEM	= See errno.h
1930  *	ENXIO	= See errno.h
1931  *
1932  * Context:
1933  *	Kernel context.
1934  */
1935 /* ARGSUSED */
1936 static int
1937 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1938     int mode, int *rval)
1939 {
1940 	int			ret		= 0;
1941 	int			temp_ret;
1942 	caddr_t			k_cdbbufaddr	= NULL;
1943 	caddr_t			k_bufaddr	= NULL;
1944 	caddr_t			k_rqbufaddr	= NULL;
1945 	caddr_t			u_cdbbufaddr;
1946 	caddr_t			u_bufaddr;
1947 	caddr_t			u_rqbufaddr;
1948 	struct fcp_scsi_cmd	k_fscsi;
1949 
1950 	/*
1951 	 * Get fcp_scsi_cmd array element from user address space
1952 	 */
1953 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1954 	    != 0) {
1955 		return (ret);
1956 	}
1957 
1958 
1959 	/*
1960 	 * Even though kmem_alloc() checks the validity of the
1961 	 * buffer length, this check is needed when the
1962 	 * kmem_flags set and the zero buffer length is passed.
1963 	 */
1964 	if ((k_fscsi.scsi_cdblen <= 0) ||
1965 	    (k_fscsi.scsi_buflen <= 0) ||
1966 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
1967 	    (k_fscsi.scsi_rqlen <= 0) ||
1968 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
1969 		return (EINVAL);
1970 	}
1971 
1972 	/*
1973 	 * Allocate data for fcp_scsi_cmd pointer fields
1974 	 */
1975 	if (ret == 0) {
1976 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1977 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1978 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
1979 
1980 		if (k_cdbbufaddr == NULL ||
1981 		    k_bufaddr	 == NULL ||
1982 		    k_rqbufaddr	 == NULL) {
1983 			ret = ENOMEM;
1984 		}
1985 	}
1986 
1987 	/*
1988 	 * Get fcp_scsi_cmd pointer fields from user
1989 	 * address space
1990 	 */
1991 	if (ret == 0) {
1992 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
1993 		u_bufaddr    = k_fscsi.scsi_bufaddr;
1994 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
1995 
1996 		if (ddi_copyin(u_cdbbufaddr,
1997 		    k_cdbbufaddr,
1998 		    k_fscsi.scsi_cdblen,
1999 		    mode)) {
2000 			ret = EFAULT;
2001 		} else if (ddi_copyin(u_bufaddr,
2002 		    k_bufaddr,
2003 		    k_fscsi.scsi_buflen,
2004 		    mode)) {
2005 			ret = EFAULT;
2006 		} else if (ddi_copyin(u_rqbufaddr,
2007 		    k_rqbufaddr,
2008 		    k_fscsi.scsi_rqlen,
2009 		    mode)) {
2010 			ret = EFAULT;
2011 		}
2012 	}
2013 
2014 	/*
2015 	 * Send scsi command (blocking)
2016 	 */
2017 	if (ret == 0) {
2018 		/*
2019 		 * Prior to sending the scsi command, the
2020 		 * fcp_scsi_cmd data structure must contain kernel,
2021 		 * not user, addresses.
2022 		 */
2023 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2024 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2025 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2026 
2027 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2028 
2029 		/*
2030 		 * After sending the scsi command, the
2031 		 * fcp_scsi_cmd data structure must contain user,
2032 		 * not kernel, addresses.
2033 		 */
2034 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2035 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2036 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2037 	}
2038 
2039 	/*
2040 	 * Put fcp_scsi_cmd pointer fields to user address space
2041 	 */
2042 	if (ret == 0) {
2043 		if (ddi_copyout(k_cdbbufaddr,
2044 		    u_cdbbufaddr,
2045 		    k_fscsi.scsi_cdblen,
2046 		    mode)) {
2047 			ret = EFAULT;
2048 		} else if (ddi_copyout(k_bufaddr,
2049 		    u_bufaddr,
2050 		    k_fscsi.scsi_buflen,
2051 		    mode)) {
2052 			ret = EFAULT;
2053 		} else if (ddi_copyout(k_rqbufaddr,
2054 		    u_rqbufaddr,
2055 		    k_fscsi.scsi_rqlen,
2056 		    mode)) {
2057 			ret = EFAULT;
2058 		}
2059 	}
2060 
2061 	/*
2062 	 * Free data for fcp_scsi_cmd pointer fields
2063 	 */
2064 	if (k_cdbbufaddr != NULL) {
2065 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2066 	}
2067 	if (k_bufaddr != NULL) {
2068 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2069 	}
2070 	if (k_rqbufaddr != NULL) {
2071 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2072 	}
2073 
2074 	/*
2075 	 * Put fcp_scsi_cmd array element to user address space
2076 	 */
2077 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2078 	if (temp_ret != 0) {
2079 		ret = temp_ret;
2080 	}
2081 
2082 	/*
2083 	 * Return status
2084 	 */
2085 	return (ret);
2086 }
2087 
2088 
2089 /*
2090  * fcp_copyin_scsi_cmd
2091  *	Copy in fcp_scsi_cmd data structure from user address space.
2092  *	The data may be in 32 bit or 64 bit modes.
2093  *
2094  * Input:
2095  *	base_addr	= from address (user address space)
2096  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2097  *
2098  * Output:
2099  *	fscsi		= to address (kernel address space)
2100  *
2101  * Returns:
2102  *	0	= OK
2103  *	EFAULT	= Error
2104  *
2105  * Context:
2106  *	Kernel context.
2107  */
2108 static int
2109 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2110 {
2111 #ifdef	_MULTI_DATAMODEL
2112 	struct fcp32_scsi_cmd	f32scsi;
2113 
2114 	switch (ddi_model_convert_from(mode & FMODELS)) {
2115 	case DDI_MODEL_ILP32:
2116 		/*
2117 		 * Copy data from user address space
2118 		 */
2119 		if (ddi_copyin((void *)base_addr,
2120 		    &f32scsi,
2121 		    sizeof (struct fcp32_scsi_cmd),
2122 		    mode)) {
2123 			return (EFAULT);
2124 		}
2125 		/*
2126 		 * Convert from 32 bit to 64 bit
2127 		 */
2128 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2129 		break;
2130 	case DDI_MODEL_NONE:
2131 		/*
2132 		 * Copy data from user address space
2133 		 */
2134 		if (ddi_copyin((void *)base_addr,
2135 		    fscsi,
2136 		    sizeof (struct fcp_scsi_cmd),
2137 		    mode)) {
2138 			return (EFAULT);
2139 		}
2140 		break;
2141 	}
2142 #else	/* _MULTI_DATAMODEL */
2143 	/*
2144 	 * Copy data from user address space
2145 	 */
2146 	if (ddi_copyin((void *)base_addr,
2147 	    fscsi,
2148 	    sizeof (struct fcp_scsi_cmd),
2149 	    mode)) {
2150 		return (EFAULT);
2151 	}
2152 #endif	/* _MULTI_DATAMODEL */
2153 
2154 	return (0);
2155 }
2156 
2157 
2158 /*
2159  * fcp_copyout_scsi_cmd
2160  *	Copy out fcp_scsi_cmd data structure to user address space.
2161  *	The data may be in 32 bit or 64 bit modes.
2162  *
2163  * Input:
2164  *	fscsi		= to address (kernel address space)
2165  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2166  *
2167  * Output:
2168  *	base_addr	= from address (user address space)
2169  *
2170  * Returns:
2171  *	0	= OK
2172  *	EFAULT	= Error
2173  *
2174  * Context:
2175  *	Kernel context.
2176  */
2177 static int
2178 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2179 {
2180 #ifdef	_MULTI_DATAMODEL
2181 	struct fcp32_scsi_cmd	f32scsi;
2182 
2183 	switch (ddi_model_convert_from(mode & FMODELS)) {
2184 	case DDI_MODEL_ILP32:
2185 		/*
2186 		 * Convert from 64 bit to 32 bit
2187 		 */
2188 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2189 		/*
2190 		 * Copy data to user address space
2191 		 */
2192 		if (ddi_copyout(&f32scsi,
2193 		    (void *)base_addr,
2194 		    sizeof (struct fcp32_scsi_cmd),
2195 		    mode)) {
2196 			return (EFAULT);
2197 		}
2198 		break;
2199 	case DDI_MODEL_NONE:
2200 		/*
2201 		 * Copy data to user address space
2202 		 */
2203 		if (ddi_copyout(fscsi,
2204 		    (void *)base_addr,
2205 		    sizeof (struct fcp_scsi_cmd),
2206 		    mode)) {
2207 			return (EFAULT);
2208 		}
2209 		break;
2210 	}
2211 #else	/* _MULTI_DATAMODEL */
2212 	/*
2213 	 * Copy data to user address space
2214 	 */
2215 	if (ddi_copyout(fscsi,
2216 	    (void *)base_addr,
2217 	    sizeof (struct fcp_scsi_cmd),
2218 	    mode)) {
2219 		return (EFAULT);
2220 	}
2221 #endif	/* _MULTI_DATAMODEL */
2222 
2223 	return (0);
2224 }
2225 
2226 
2227 /*
2228  * fcp_send_scsi_ioctl
2229  *	Sends the SCSI command in blocking mode.
2230  *
2231  * Input:
2232  *	fscsi		= SCSI command data structure
2233  *
2234  * Output:
2235  *	fscsi		= SCSI command data structure
2236  *
2237  * Returns:
2238  *	0	= OK
2239  *	EAGAIN	= See errno.h
2240  *	EBUSY	= See errno.h
2241  *	EINTR	= See errno.h
2242  *	EINVAL	= See errno.h
2243  *	EIO	= See errno.h
2244  *	ENOMEM	= See errno.h
2245  *	ENXIO	= See errno.h
2246  *
2247  * Context:
2248  *	Kernel context.
2249  */
2250 static int
2251 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2252 {
2253 	struct fcp_lun	*plun		= NULL;
2254 	struct fcp_port	*pptr		= NULL;
2255 	struct fcp_tgt	*ptgt		= NULL;
2256 	fc_packet_t		*fpkt		= NULL;
2257 	struct fcp_ipkt	*icmd		= NULL;
2258 	int			target_created	= FALSE;
2259 	fc_frame_hdr_t		*hp;
2260 	struct fcp_cmd		fcp_cmd;
2261 	struct fcp_cmd		*fcmd;
2262 	union scsi_cdb		*scsi_cdb;
2263 	la_wwn_t		*wwn_ptr;
2264 	int			nodma;
2265 	struct fcp_rsp		*rsp;
2266 	struct fcp_rsp_info	*rsp_info;
2267 	caddr_t			rsp_sense;
2268 	int			buf_len;
2269 	int			info_len;
2270 	int			sense_len;
2271 	struct scsi_extended_sense	*sense_to = NULL;
2272 	timeout_id_t		tid;
2273 	uint8_t			reconfig_lun = FALSE;
2274 	uint8_t			reconfig_pending = FALSE;
2275 	uint8_t			scsi_cmd;
2276 	int			rsp_len;
2277 	int			cmd_index;
2278 	int			fc_status;
2279 	int			pkt_state;
2280 	int			pkt_action;
2281 	int			pkt_reason;
2282 	int			ret, xport_retval = ~FC_SUCCESS;
2283 	int			lcount;
2284 	int			tcount;
2285 	int			reconfig_status;
2286 	int			port_busy = FALSE;
2287 	uchar_t			*lun_string;
2288 
2289 	/*
2290 	 * Check valid SCSI command
2291 	 */
2292 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2293 	ret = EINVAL;
2294 	for (cmd_index = 0;
2295 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2296 	    ret != 0;
2297 	    cmd_index++) {
2298 		/*
2299 		 * First byte of CDB is the SCSI command
2300 		 */
2301 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2302 			ret = 0;
2303 		}
2304 	}
2305 
2306 	/*
2307 	 * Check inputs
2308 	 */
2309 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2310 		ret = EINVAL;
2311 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2312 		/* no larger than */
2313 		ret = EINVAL;
2314 	}
2315 
2316 
2317 	/*
2318 	 * Find FC port
2319 	 */
2320 	if (ret == 0) {
2321 		/*
2322 		 * Acquire global mutex
2323 		 */
2324 		mutex_enter(&fcp_global_mutex);
2325 
2326 		pptr = fcp_port_head;
2327 		while (pptr) {
2328 			if (pptr->port_instance ==
2329 			    (uint32_t)fscsi->scsi_fc_port_num) {
2330 				break;
2331 			} else {
2332 				pptr = pptr->port_next;
2333 			}
2334 		}
2335 
2336 		if (pptr == NULL) {
2337 			ret = ENXIO;
2338 		} else {
2339 			/*
2340 			 * fc_ulp_busy_port can raise power
2341 			 *  so, we must not hold any mutexes involved in PM
2342 			 */
2343 			mutex_exit(&fcp_global_mutex);
2344 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2345 		}
2346 
2347 		if (ret == 0) {
2348 
2349 			/* remember port is busy, so we will release later */
2350 			port_busy = TRUE;
2351 
2352 			/*
2353 			 * If there is a reconfiguration in progress, wait
2354 			 * for it to complete.
2355 			 */
2356 
2357 			fcp_reconfig_wait(pptr);
2358 
2359 			/* reacquire mutexes in order */
2360 			mutex_enter(&fcp_global_mutex);
2361 			mutex_enter(&pptr->port_mutex);
2362 
2363 			/*
2364 			 * Will port accept DMA?
2365 			 */
2366 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2367 			    ? 1 : 0;
2368 
2369 			/*
2370 			 * If init or offline, device not known
2371 			 *
2372 			 * If we are discovering (onlining), we can
2373 			 * NOT obviously provide reliable data about
2374 			 * devices until it is complete
2375 			 */
2376 			if (pptr->port_state &	  (FCP_STATE_INIT |
2377 			    FCP_STATE_OFFLINE)) {
2378 				ret = ENXIO;
2379 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2380 				ret = EBUSY;
2381 			} else {
2382 				/*
2383 				 * Find target from pwwn
2384 				 *
2385 				 * The wwn must be put into a local
2386 				 * variable to ensure alignment.
2387 				 */
2388 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2389 				ptgt = fcp_lookup_target(pptr,
2390 				    (uchar_t *)wwn_ptr);
2391 
2392 				/*
2393 				 * If target not found,
2394 				 */
2395 				if (ptgt == NULL) {
2396 					/*
2397 					 * Note: Still have global &
2398 					 * port mutexes
2399 					 */
2400 					mutex_exit(&pptr->port_mutex);
2401 					ptgt = fcp_port_create_tgt(pptr,
2402 					    wwn_ptr, &ret, &fc_status,
2403 					    &pkt_state, &pkt_action,
2404 					    &pkt_reason);
2405 					mutex_enter(&pptr->port_mutex);
2406 
2407 					fscsi->scsi_fc_status  = fc_status;
2408 					fscsi->scsi_pkt_state  =
2409 					    (uchar_t)pkt_state;
2410 					fscsi->scsi_pkt_reason = pkt_reason;
2411 					fscsi->scsi_pkt_action =
2412 					    (uchar_t)pkt_action;
2413 
2414 					if (ptgt != NULL) {
2415 						target_created = TRUE;
2416 					} else if (ret == 0) {
2417 						ret = ENOMEM;
2418 					}
2419 				}
2420 
2421 				if (ret == 0) {
2422 					/*
2423 					 * Acquire target
2424 					 */
2425 					mutex_enter(&ptgt->tgt_mutex);
2426 
2427 					/*
2428 					 * If target is mark or busy,
2429 					 * then target can not be used
2430 					 */
2431 					if (ptgt->tgt_state &
2432 					    (FCP_TGT_MARK |
2433 					    FCP_TGT_BUSY)) {
2434 						ret = EBUSY;
2435 					} else {
2436 						/*
2437 						 * Mark target as busy
2438 						 */
2439 						ptgt->tgt_state |=
2440 						    FCP_TGT_BUSY;
2441 					}
2442 
2443 					/*
2444 					 * Release target
2445 					 */
2446 					lcount = pptr->port_link_cnt;
2447 					tcount = ptgt->tgt_change_cnt;
2448 					mutex_exit(&ptgt->tgt_mutex);
2449 				}
2450 			}
2451 
2452 			/*
2453 			 * Release port
2454 			 */
2455 			mutex_exit(&pptr->port_mutex);
2456 		}
2457 
2458 		/*
2459 		 * Release global mutex
2460 		 */
2461 		mutex_exit(&fcp_global_mutex);
2462 	}
2463 
2464 	if (ret == 0) {
2465 		uint64_t belun = BE_64(fscsi->scsi_lun);
2466 
2467 		/*
2468 		 * If it's a target device, find lun from pwwn
2469 		 * The wwn must be put into a local
2470 		 * variable to ensure alignment.
2471 		 */
2472 		mutex_enter(&pptr->port_mutex);
2473 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2474 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2475 			/* this is not a target */
2476 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2477 			ret = ENXIO;
2478 		} else if ((belun << 16) != 0) {
2479 			/*
2480 			 * Since fcp only support PD and LU addressing method
2481 			 * so far, the last 6 bytes of a valid LUN are expected
2482 			 * to be filled with 00h.
2483 			 */
2484 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2485 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2486 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2487 			    (uint8_t)(belun >> 62), belun);
2488 			ret = ENXIO;
2489 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2490 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2491 			/*
2492 			 * This is a SCSI target, but no LUN at this
2493 			 * address.
2494 			 *
2495 			 * In the future, we may want to send this to
2496 			 * the target, and let it respond
2497 			 * appropriately
2498 			 */
2499 			ret = ENXIO;
2500 		}
2501 		mutex_exit(&pptr->port_mutex);
2502 	}
2503 
2504 	/*
2505 	 * Finished grabbing external resources
2506 	 * Allocate internal packet (icmd)
2507 	 */
2508 	if (ret == 0) {
2509 		/*
2510 		 * Calc rsp len assuming rsp info included
2511 		 */
2512 		rsp_len = sizeof (struct fcp_rsp) +
2513 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2514 
2515 		icmd = fcp_icmd_alloc(pptr, ptgt,
2516 		    sizeof (struct fcp_cmd),
2517 		    rsp_len,
2518 		    fscsi->scsi_buflen,
2519 		    nodma,
2520 		    lcount,			/* ipkt_link_cnt */
2521 		    tcount,			/* ipkt_change_cnt */
2522 		    0,				/* cause */
2523 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2524 
2525 		if (icmd == NULL) {
2526 			ret = ENOMEM;
2527 		} else {
2528 			/*
2529 			 * Setup internal packet as sema sync
2530 			 */
2531 			fcp_ipkt_sema_init(icmd);
2532 		}
2533 	}
2534 
2535 	if (ret == 0) {
2536 		/*
2537 		 * Init fpkt pointer for use.
2538 		 */
2539 
2540 		fpkt = icmd->ipkt_fpkt;
2541 
2542 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2543 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2544 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2545 
2546 		/*
2547 		 * Init fcmd pointer for use by SCSI command
2548 		 */
2549 
2550 		if (nodma) {
2551 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2552 		} else {
2553 			fcmd = &fcp_cmd;
2554 		}
2555 		bzero(fcmd, sizeof (struct fcp_cmd));
2556 		ptgt = plun->lun_tgt;
2557 
2558 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2559 
2560 		fcmd->fcp_ent_addr.ent_addr_0 =
2561 		    BE_16(*(uint16_t *)&(lun_string[0]));
2562 		fcmd->fcp_ent_addr.ent_addr_1 =
2563 		    BE_16(*(uint16_t *)&(lun_string[2]));
2564 		fcmd->fcp_ent_addr.ent_addr_2 =
2565 		    BE_16(*(uint16_t *)&(lun_string[4]));
2566 		fcmd->fcp_ent_addr.ent_addr_3 =
2567 		    BE_16(*(uint16_t *)&(lun_string[6]));
2568 
2569 		/*
2570 		 * Setup internal packet(icmd)
2571 		 */
2572 		icmd->ipkt_lun		= plun;
2573 		icmd->ipkt_restart	= 0;
2574 		icmd->ipkt_retries	= 0;
2575 		icmd->ipkt_opcode	= 0;
2576 
2577 		/*
2578 		 * Init the frame HEADER Pointer for use
2579 		 */
2580 		hp = &fpkt->pkt_cmd_fhdr;
2581 
2582 		hp->s_id	= pptr->port_id;
2583 		hp->d_id	= ptgt->tgt_d_id;
2584 		hp->r_ctl	= R_CTL_COMMAND;
2585 		hp->type	= FC_TYPE_SCSI_FCP;
2586 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2587 		hp->rsvd	= 0;
2588 		hp->seq_id	= 0;
2589 		hp->seq_cnt	= 0;
2590 		hp->ox_id	= 0xffff;
2591 		hp->rx_id	= 0xffff;
2592 		hp->ro		= 0;
2593 
2594 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2595 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2596 		fcmd->fcp_cntl.cntl_write_data	= 0;
2597 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2598 
2599 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2600 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2601 		    fscsi->scsi_cdblen);
2602 
2603 		if (!nodma) {
2604 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2605 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2606 		}
2607 
2608 		/*
2609 		 * Send SCSI command to FC transport
2610 		 */
2611 
2612 		if (ret == 0) {
2613 			mutex_enter(&ptgt->tgt_mutex);
2614 
2615 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2616 				mutex_exit(&ptgt->tgt_mutex);
2617 				fscsi->scsi_fc_status = xport_retval =
2618 				    fc_ulp_transport(pptr->port_fp_handle,
2619 				    fpkt);
2620 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2621 					ret = EIO;
2622 				}
2623 			} else {
2624 				mutex_exit(&ptgt->tgt_mutex);
2625 				ret = EBUSY;
2626 			}
2627 		}
2628 	}
2629 
2630 	/*
2631 	 * Wait for completion only if fc_ulp_transport was called and it
2632 	 * returned a success. This is the only time callback will happen.
2633 	 * Otherwise, there is no point in waiting
2634 	 */
2635 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2636 		ret = fcp_ipkt_sema_wait(icmd);
2637 	}
2638 
2639 	/*
2640 	 * Copy data to IOCTL data structures
2641 	 */
2642 	rsp = NULL;
2643 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2644 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2645 
2646 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2647 			fcp_log(CE_WARN, pptr->port_dip,
2648 			    "!SCSI command to d_id=0x%x lun=0x%x"
2649 			    " failed, Bad FCP response values:"
2650 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2651 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2652 			    ptgt->tgt_d_id, plun->lun_num,
2653 			    rsp->reserved_0, rsp->reserved_1,
2654 			    rsp->fcp_u.fcp_status.reserved_0,
2655 			    rsp->fcp_u.fcp_status.reserved_1,
2656 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2657 
2658 			ret = EIO;
2659 		}
2660 	}
2661 
2662 	if ((ret == 0) && (rsp != NULL)) {
2663 		/*
2664 		 * Calc response lengths
2665 		 */
2666 		sense_len = 0;
2667 		info_len = 0;
2668 
2669 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2670 			info_len = rsp->fcp_response_len;
2671 		}
2672 
2673 		rsp_info   = (struct fcp_rsp_info *)
2674 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2675 
2676 		/*
2677 		 * Get SCSI status
2678 		 */
2679 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2680 		/*
2681 		 * If a lun was just added or removed and the next command
2682 		 * comes through this interface, we need to capture the check
2683 		 * condition so we can discover the new topology.
2684 		 */
2685 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2686 		    rsp->fcp_u.fcp_status.sense_len_set) {
2687 			sense_len = rsp->fcp_sense_len;
2688 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2689 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2690 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2691 			    (FCP_SENSE_NO_LUN(sense_to))) {
2692 				reconfig_lun = TRUE;
2693 			}
2694 		}
2695 
2696 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2697 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2698 			if (reconfig_lun == FALSE) {
2699 				reconfig_status =
2700 				    fcp_is_reconfig_needed(ptgt, fpkt);
2701 			}
2702 
2703 			if ((reconfig_lun == TRUE) ||
2704 			    (reconfig_status == TRUE)) {
2705 				mutex_enter(&ptgt->tgt_mutex);
2706 				if (ptgt->tgt_tid == NULL) {
2707 					/*
2708 					 * Either we've been notified the
2709 					 * REPORT_LUN data has changed, or
2710 					 * we've determined on our own that
2711 					 * we're out of date.  Kick off
2712 					 * rediscovery.
2713 					 */
2714 					tid = timeout(fcp_reconfigure_luns,
2715 					    (caddr_t)ptgt, drv_usectohz(1));
2716 
2717 					ptgt->tgt_tid = tid;
2718 					ptgt->tgt_state |= FCP_TGT_BUSY;
2719 					ret = EBUSY;
2720 					reconfig_pending = TRUE;
2721 				}
2722 				mutex_exit(&ptgt->tgt_mutex);
2723 			}
2724 		}
2725 
2726 		/*
2727 		 * Calc residuals and buffer lengths
2728 		 */
2729 
2730 		if (ret == 0) {
2731 			buf_len = fscsi->scsi_buflen;
2732 			fscsi->scsi_bufresid	= 0;
2733 			if (rsp->fcp_u.fcp_status.resid_under) {
2734 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2735 					fscsi->scsi_bufresid = rsp->fcp_resid;
2736 				} else {
2737 					cmn_err(CE_WARN, "fcp: bad residue %x "
2738 					    "for txfer len %x", rsp->fcp_resid,
2739 					    fscsi->scsi_buflen);
2740 					fscsi->scsi_bufresid =
2741 					    fscsi->scsi_buflen;
2742 				}
2743 				buf_len -= fscsi->scsi_bufresid;
2744 			}
2745 			if (rsp->fcp_u.fcp_status.resid_over) {
2746 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2747 			}
2748 
2749 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2750 			if (fscsi->scsi_rqlen < sense_len) {
2751 				sense_len = fscsi->scsi_rqlen;
2752 			}
2753 
2754 			fscsi->scsi_fc_rspcode	= 0;
2755 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2756 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2757 			}
2758 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2759 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2760 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2761 
2762 			/*
2763 			 * Copy data and request sense
2764 			 *
2765 			 * Data must be copied by using the FCP_CP_IN macro.
2766 			 * This will ensure the proper byte order since the data
2767 			 * is being copied directly from the memory mapped
2768 			 * device register.
2769 			 *
2770 			 * The response (and request sense) will be in the
2771 			 * correct byte order.	No special copy is necessary.
2772 			 */
2773 
2774 			if (buf_len) {
2775 				FCP_CP_IN(fpkt->pkt_data,
2776 				    fscsi->scsi_bufaddr,
2777 				    fpkt->pkt_data_acc,
2778 				    buf_len);
2779 			}
2780 			bcopy((void *)rsp_sense,
2781 			    (void *)fscsi->scsi_rqbufaddr,
2782 			    sense_len);
2783 		}
2784 	}
2785 
2786 	/*
2787 	 * Cleanup transport data structures if icmd was alloc-ed
2788 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2789 	 */
2790 	if (icmd != NULL) {
2791 		fcp_ipkt_sema_cleanup(icmd);
2792 	}
2793 
2794 	/* restore pm busy/idle status */
2795 	if (port_busy) {
2796 		fc_ulp_idle_port(pptr->port_fp_handle);
2797 	}
2798 
2799 	/*
2800 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2801 	 * flag, it'll be cleared when the reconfig is complete.
2802 	 */
2803 	if ((ptgt != NULL) && !reconfig_pending) {
2804 		/*
2805 		 * If target was created,
2806 		 */
2807 		if (target_created) {
2808 			mutex_enter(&ptgt->tgt_mutex);
2809 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2810 			mutex_exit(&ptgt->tgt_mutex);
2811 		} else {
2812 			/*
2813 			 * De-mark target as busy
2814 			 */
2815 			mutex_enter(&ptgt->tgt_mutex);
2816 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2817 			mutex_exit(&ptgt->tgt_mutex);
2818 		}
2819 	}
2820 	return (ret);
2821 }
2822 
2823 
2824 static int
2825 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2826     fc_packet_t	*fpkt)
2827 {
2828 	uchar_t			*lun_string;
2829 	uint16_t		lun_num, i;
2830 	int			num_luns;
2831 	int			actual_luns;
2832 	int			num_masked_luns;
2833 	int			lun_buflen;
2834 	struct fcp_lun	*plun	= NULL;
2835 	struct fcp_reportlun_resp	*report_lun;
2836 	uint8_t			reconfig_needed = FALSE;
2837 	uint8_t			lun_exists = FALSE;
2838 
2839 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2840 
2841 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2842 	    fpkt->pkt_datalen);
2843 
2844 	/* get number of luns (which is supplied as LUNS * 8) */
2845 	num_luns = BE_32(report_lun->num_lun) >> 3;
2846 
2847 	/*
2848 	 * Figure out exactly how many lun strings our response buffer
2849 	 * can hold.
2850 	 */
2851 	lun_buflen = (fpkt->pkt_datalen -
2852 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2853 
2854 	/*
2855 	 * Is our response buffer full or not? We don't want to
2856 	 * potentially walk beyond the number of luns we have.
2857 	 */
2858 	if (num_luns <= lun_buflen) {
2859 		actual_luns = num_luns;
2860 	} else {
2861 		actual_luns = lun_buflen;
2862 	}
2863 
2864 	mutex_enter(&ptgt->tgt_mutex);
2865 
2866 	/* Scan each lun to see if we have masked it. */
2867 	num_masked_luns = 0;
2868 	if (fcp_lun_blacklist != NULL) {
2869 		for (i = 0; i < actual_luns; i++) {
2870 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2871 			switch (lun_string[0] & 0xC0) {
2872 			case FCP_LUN_ADDRESSING:
2873 			case FCP_PD_ADDRESSING:
2874 			case FCP_VOLUME_ADDRESSING:
2875 				lun_num = ((lun_string[0] & 0x3F) << 8)
2876 				    | lun_string[1];
2877 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2878 				    lun_num) == TRUE) {
2879 					num_masked_luns++;
2880 				}
2881 				break;
2882 			default:
2883 				break;
2884 			}
2885 		}
2886 	}
2887 
2888 	/*
2889 	 * The quick and easy check.  If the number of LUNs reported
2890 	 * doesn't match the number we currently know about, we need
2891 	 * to reconfigure.
2892 	 */
2893 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2894 		mutex_exit(&ptgt->tgt_mutex);
2895 		kmem_free(report_lun, fpkt->pkt_datalen);
2896 		return (TRUE);
2897 	}
2898 
2899 	/*
2900 	 * If the quick and easy check doesn't turn up anything, we walk
2901 	 * the list of luns from the REPORT_LUN response and look for
2902 	 * any luns we don't know about.  If we find one, we know we need
2903 	 * to reconfigure. We will skip LUNs that are masked because of the
2904 	 * blacklist.
2905 	 */
2906 	for (i = 0; i < actual_luns; i++) {
2907 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 		lun_exists = FALSE;
2909 		switch (lun_string[0] & 0xC0) {
2910 		case FCP_LUN_ADDRESSING:
2911 		case FCP_PD_ADDRESSING:
2912 		case FCP_VOLUME_ADDRESSING:
2913 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2914 
2915 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2916 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2917 				lun_exists = TRUE;
2918 				break;
2919 			}
2920 
2921 			for (plun = ptgt->tgt_lun; plun;
2922 			    plun = plun->lun_next) {
2923 				if (plun->lun_num == lun_num) {
2924 					lun_exists = TRUE;
2925 					break;
2926 				}
2927 			}
2928 			break;
2929 		default:
2930 			break;
2931 		}
2932 
2933 		if (lun_exists == FALSE) {
2934 			reconfig_needed = TRUE;
2935 			break;
2936 		}
2937 	}
2938 
2939 	mutex_exit(&ptgt->tgt_mutex);
2940 	kmem_free(report_lun, fpkt->pkt_datalen);
2941 
2942 	return (reconfig_needed);
2943 }
2944 
2945 /*
2946  * This function is called by fcp_handle_page83 and uses inquiry response data
2947  * stored in plun->lun_inq to determine whether or not a device is a member of
2948  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2949  * otherwise 1.
2950  */
2951 static int
2952 fcp_symmetric_device_probe(struct fcp_lun *plun)
2953 {
2954 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2955 	char			*devidptr;
2956 	int			i, len;
2957 
2958 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2959 		devidptr = fcp_symmetric_disk_table[i];
2960 		len = (int)strlen(devidptr);
2961 
2962 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2963 			return (0);
2964 		}
2965 	}
2966 	return (1);
2967 }
2968 
2969 
2970 /*
2971  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2972  * It basically returns the current count of # of state change callbacks
2973  * i.e the value of tgt_change_cnt.
2974  *
2975  * INPUT:
2976  *   fcp_ioctl.fp_minor -> The minor # of the fp port
2977  *   fcp_ioctl.listlen	-> 1
2978  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
2979  */
2980 /*ARGSUSED2*/
2981 static int
2982 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
2983 {
2984 	int			ret;
2985 	uint32_t		link_cnt;
2986 	struct fcp_ioctl	fioctl;
2987 	struct fcp_port	*pptr = NULL;
2988 
2989 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
2990 	    &pptr)) != 0) {
2991 		return (ret);
2992 	}
2993 
2994 	ASSERT(pptr != NULL);
2995 
2996 	if (fioctl.listlen != 1) {
2997 		return (EINVAL);
2998 	}
2999 
3000 	mutex_enter(&pptr->port_mutex);
3001 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3002 		mutex_exit(&pptr->port_mutex);
3003 		return (ENXIO);
3004 	}
3005 
3006 	/*
3007 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3008 	 * When the fcp initially attaches to the port and there are nothing
3009 	 * hanging out of the port or if there was a repeat offline state change
3010 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3011 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3012 	 * will differentiate the 2 cases.
3013 	 */
3014 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3015 		mutex_exit(&pptr->port_mutex);
3016 		return (ENXIO);
3017 	}
3018 
3019 	link_cnt = pptr->port_link_cnt;
3020 	mutex_exit(&pptr->port_mutex);
3021 
3022 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3023 		return (EFAULT);
3024 	}
3025 
3026 #ifdef	_MULTI_DATAMODEL
3027 	switch (ddi_model_convert_from(mode & FMODELS)) {
3028 	case DDI_MODEL_ILP32: {
3029 		struct fcp32_ioctl f32_ioctl;
3030 
3031 		f32_ioctl.fp_minor = fioctl.fp_minor;
3032 		f32_ioctl.listlen = fioctl.listlen;
3033 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3034 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3035 		    sizeof (struct fcp32_ioctl), mode)) {
3036 			return (EFAULT);
3037 		}
3038 		break;
3039 	}
3040 	case DDI_MODEL_NONE:
3041 		if (ddi_copyout((void *)&fioctl, (void *)data,
3042 		    sizeof (struct fcp_ioctl), mode)) {
3043 			return (EFAULT);
3044 		}
3045 		break;
3046 	}
3047 #else	/* _MULTI_DATAMODEL */
3048 
3049 	if (ddi_copyout((void *)&fioctl, (void *)data,
3050 	    sizeof (struct fcp_ioctl), mode)) {
3051 		return (EFAULT);
3052 	}
3053 #endif	/* _MULTI_DATAMODEL */
3054 
3055 	return (0);
3056 }
3057 
3058 /*
3059  * This function copies the fcp_ioctl structure passed in from user land
3060  * into kernel land. Handles 32 bit applications.
3061  */
3062 /*ARGSUSED*/
3063 static int
3064 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3065     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3066 {
3067 	struct fcp_port	*t_pptr;
3068 
3069 #ifdef	_MULTI_DATAMODEL
3070 	switch (ddi_model_convert_from(mode & FMODELS)) {
3071 	case DDI_MODEL_ILP32: {
3072 		struct fcp32_ioctl f32_ioctl;
3073 
3074 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3075 		    sizeof (struct fcp32_ioctl), mode)) {
3076 			return (EFAULT);
3077 		}
3078 		fioctl->fp_minor = f32_ioctl.fp_minor;
3079 		fioctl->listlen = f32_ioctl.listlen;
3080 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3081 		break;
3082 	}
3083 	case DDI_MODEL_NONE:
3084 		if (ddi_copyin((void *)data, (void *)fioctl,
3085 		    sizeof (struct fcp_ioctl), mode)) {
3086 			return (EFAULT);
3087 		}
3088 		break;
3089 	}
3090 
3091 #else	/* _MULTI_DATAMODEL */
3092 	if (ddi_copyin((void *)data, (void *)fioctl,
3093 	    sizeof (struct fcp_ioctl), mode)) {
3094 		return (EFAULT);
3095 	}
3096 #endif	/* _MULTI_DATAMODEL */
3097 
3098 	/*
3099 	 * Right now we can assume that the minor number matches with
3100 	 * this instance of fp. If this changes we will need to
3101 	 * revisit this logic.
3102 	 */
3103 	mutex_enter(&fcp_global_mutex);
3104 	t_pptr = fcp_port_head;
3105 	while (t_pptr) {
3106 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3107 			break;
3108 		} else {
3109 			t_pptr = t_pptr->port_next;
3110 		}
3111 	}
3112 	*pptr = t_pptr;
3113 	mutex_exit(&fcp_global_mutex);
3114 	if (t_pptr == NULL) {
3115 		return (ENXIO);
3116 	}
3117 
3118 	return (0);
3119 }
3120 
3121 /*
3122  *     Function: fcp_port_create_tgt
3123  *
3124  *  Description: As the name suggest this function creates the target context
3125  *		 specified by the the WWN provided by the caller.  If the
3126  *		 creation goes well and the target is known by fp/fctl a PLOGI
3127  *		 followed by a PRLI are issued.
3128  *
3129  *     Argument: pptr		fcp port structure
3130  *		 pwwn		WWN of the target
3131  *		 ret_val	Address of the return code.  It could be:
3132  *				EIO, ENOMEM or 0.
3133  *		 fc_status	PLOGI or PRLI status completion
3134  *		 fc_pkt_state	PLOGI or PRLI state completion
3135  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3136  *		 fc_pkt_action	PLOGI or PRLI action completion
3137  *
3138  * Return Value: NULL if it failed
3139  *		 Target structure address if it succeeds
3140  */
3141 static struct fcp_tgt *
3142 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3143     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3144 {
3145 	struct fcp_tgt	*ptgt = NULL;
3146 	fc_portmap_t		devlist;
3147 	int			lcount;
3148 	int			error;
3149 
3150 	*ret_val = 0;
3151 
3152 	/*
3153 	 * Check FC port device & get port map
3154 	 */
3155 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3156 	    &error, 1) == NULL) {
3157 		*ret_val = EIO;
3158 	} else {
3159 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3160 		    &devlist) != FC_SUCCESS) {
3161 			*ret_val = EIO;
3162 		}
3163 	}
3164 
3165 	/* Set port map flags */
3166 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3167 
3168 	/* Allocate target */
3169 	if (*ret_val == 0) {
3170 		lcount = pptr->port_link_cnt;
3171 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3172 		if (ptgt == NULL) {
3173 			fcp_log(CE_WARN, pptr->port_dip,
3174 			    "!FC target allocation failed");
3175 			*ret_val = ENOMEM;
3176 		} else {
3177 			/* Setup target */
3178 			mutex_enter(&ptgt->tgt_mutex);
3179 
3180 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3181 			ptgt->tgt_tmp_cnt	= 1;
3182 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3183 			ptgt->tgt_hard_addr	=
3184 			    devlist.map_hard_addr.hard_addr;
3185 			ptgt->tgt_pd_handle	= devlist.map_pd;
3186 			ptgt->tgt_fca_dev	= NULL;
3187 
3188 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3189 			    FC_WWN_SIZE);
3190 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3191 			    FC_WWN_SIZE);
3192 
3193 			mutex_exit(&ptgt->tgt_mutex);
3194 		}
3195 	}
3196 
3197 	/* Release global mutex for PLOGI and PRLI */
3198 	mutex_exit(&fcp_global_mutex);
3199 
3200 	/* Send PLOGI (If necessary) */
3201 	if (*ret_val == 0) {
3202 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3203 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3204 	}
3205 
3206 	/* Send PRLI (If necessary) */
3207 	if (*ret_val == 0) {
3208 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3209 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3210 	}
3211 
3212 	mutex_enter(&fcp_global_mutex);
3213 
3214 	return (ptgt);
3215 }
3216 
3217 /*
3218  *     Function: fcp_tgt_send_plogi
3219  *
3220  *  Description: This function sends a PLOGI to the target specified by the
3221  *		 caller and waits till it completes.
3222  *
3223  *     Argument: ptgt		Target to send the plogi to.
3224  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3225  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3226  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3227  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3228  *
3229  * Return Value: 0
3230  *		 ENOMEM
3231  *		 EIO
3232  *
3233  *	Context: User context.
3234  */
3235 static int
3236 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3237     int *fc_pkt_reason, int *fc_pkt_action)
3238 {
3239 	struct fcp_port	*pptr;
3240 	struct fcp_ipkt	*icmd;
3241 	struct fc_packet	*fpkt;
3242 	fc_frame_hdr_t		*hp;
3243 	struct la_els_logi	logi;
3244 	int			tcount;
3245 	int			lcount;
3246 	int			ret, login_retval = ~FC_SUCCESS;
3247 
3248 	ret = 0;
3249 
3250 	pptr = ptgt->tgt_port;
3251 
3252 	lcount = pptr->port_link_cnt;
3253 	tcount = ptgt->tgt_change_cnt;
3254 
3255 	/* Alloc internal packet */
3256 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3257 	    sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0,
3258 	    FC_INVALID_RSCN_COUNT);
3259 
3260 	if (icmd == NULL) {
3261 		ret = ENOMEM;
3262 	} else {
3263 		/*
3264 		 * Setup internal packet as sema sync
3265 		 */
3266 		fcp_ipkt_sema_init(icmd);
3267 
3268 		/*
3269 		 * Setup internal packet (icmd)
3270 		 */
3271 		icmd->ipkt_lun		= NULL;
3272 		icmd->ipkt_restart	= 0;
3273 		icmd->ipkt_retries	= 0;
3274 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3275 
3276 		/*
3277 		 * Setup fc_packet
3278 		 */
3279 		fpkt = icmd->ipkt_fpkt;
3280 
3281 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3282 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3283 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3284 
3285 		/*
3286 		 * Setup FC frame header
3287 		 */
3288 		hp = &fpkt->pkt_cmd_fhdr;
3289 
3290 		hp->s_id	= pptr->port_id;	/* source ID */
3291 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3292 		hp->r_ctl	= R_CTL_ELS_REQ;
3293 		hp->type	= FC_TYPE_EXTENDED_LS;
3294 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3295 		hp->seq_id	= 0;
3296 		hp->rsvd	= 0;
3297 		hp->df_ctl	= 0;
3298 		hp->seq_cnt	= 0;
3299 		hp->ox_id	= 0xffff;		/* i.e. none */
3300 		hp->rx_id	= 0xffff;		/* i.e. none */
3301 		hp->ro		= 0;
3302 
3303 		/*
3304 		 * Setup PLOGI
3305 		 */
3306 		bzero(&logi, sizeof (struct la_els_logi));
3307 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3308 
3309 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3310 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3311 
3312 		/*
3313 		 * Send PLOGI
3314 		 */
3315 		*fc_status = login_retval =
3316 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3317 		if (*fc_status != FC_SUCCESS) {
3318 			ret = EIO;
3319 		}
3320 	}
3321 
3322 	/*
3323 	 * Wait for completion
3324 	 */
3325 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3326 		ret = fcp_ipkt_sema_wait(icmd);
3327 
3328 		*fc_pkt_state	= fpkt->pkt_state;
3329 		*fc_pkt_reason	= fpkt->pkt_reason;
3330 		*fc_pkt_action	= fpkt->pkt_action;
3331 	}
3332 
3333 	/*
3334 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3335 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3336 	 * Otherwise, cleanup happens in callback routine.
3337 	 */
3338 	if (icmd != NULL) {
3339 		fcp_ipkt_sema_cleanup(icmd);
3340 	}
3341 
3342 	return (ret);
3343 }
3344 
3345 /*
3346  *     Function: fcp_tgt_send_prli
3347  *
3348  *  Description: Does nothing as of today.
3349  *
3350  *     Argument: ptgt		Target to send the prli to.
3351  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3352  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3353  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3354  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3355  *
3356  * Return Value: 0
3357  */
3358 /*ARGSUSED*/
3359 static int
3360 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3361     int *fc_pkt_reason, int *fc_pkt_action)
3362 {
3363 	return (0);
3364 }
3365 
3366 /*
3367  *     Function: fcp_ipkt_sema_init
3368  *
3369  *  Description: Initializes the semaphore contained in the internal packet.
3370  *
3371  *     Argument: icmd	Internal packet the semaphore of which must be
3372  *			initialized.
3373  *
3374  * Return Value: None
3375  *
3376  *	Context: User context only.
3377  */
3378 static void
3379 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3380 {
3381 	struct fc_packet	*fpkt;
3382 
3383 	fpkt = icmd->ipkt_fpkt;
3384 
3385 	/* Create semaphore for sync */
3386 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3387 
3388 	/* Setup the completion callback */
3389 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3390 }
3391 
3392 /*
3393  *     Function: fcp_ipkt_sema_wait
3394  *
3395  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3396  *		 semaphore is released in the callback.
3397  *
3398  *     Argument: icmd	Internal packet to wait on for completion.
3399  *
3400  * Return Value: 0
3401  *		 EIO
3402  *		 EBUSY
3403  *		 EAGAIN
3404  *
3405  *	Context: User context only.
3406  *
3407  * This function does a conversion between the field pkt_state of the fc_packet
3408  * embedded in the internal packet (icmd) and the code it returns.
3409  */
3410 static int
3411 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3412 {
3413 	struct fc_packet	*fpkt;
3414 	int	ret;
3415 
3416 	ret = EIO;
3417 	fpkt = icmd->ipkt_fpkt;
3418 
3419 	/*
3420 	 * Wait on semaphore
3421 	 */
3422 	sema_p(&(icmd->ipkt_sema));
3423 
3424 	/*
3425 	 * Check the status of the FC packet
3426 	 */
3427 	switch (fpkt->pkt_state) {
3428 	case FC_PKT_SUCCESS:
3429 		ret = 0;
3430 		break;
3431 	case FC_PKT_LOCAL_RJT:
3432 		switch (fpkt->pkt_reason) {
3433 		case FC_REASON_SEQ_TIMEOUT:
3434 		case FC_REASON_RX_BUF_TIMEOUT:
3435 			ret = EAGAIN;
3436 			break;
3437 		case FC_REASON_PKT_BUSY:
3438 			ret = EBUSY;
3439 			break;
3440 		}
3441 		break;
3442 	case FC_PKT_TIMEOUT:
3443 		ret = EAGAIN;
3444 		break;
3445 	case FC_PKT_LOCAL_BSY:
3446 	case FC_PKT_TRAN_BSY:
3447 	case FC_PKT_NPORT_BSY:
3448 	case FC_PKT_FABRIC_BSY:
3449 		ret = EBUSY;
3450 		break;
3451 	case FC_PKT_LS_RJT:
3452 	case FC_PKT_BA_RJT:
3453 		switch (fpkt->pkt_reason) {
3454 		case FC_REASON_LOGICAL_BSY:
3455 			ret = EBUSY;
3456 			break;
3457 		}
3458 		break;
3459 	case FC_PKT_FS_RJT:
3460 		switch (fpkt->pkt_reason) {
3461 		case FC_REASON_FS_LOGICAL_BUSY:
3462 			ret = EBUSY;
3463 			break;
3464 		}
3465 		break;
3466 	}
3467 
3468 	return (ret);
3469 }
3470 
3471 /*
3472  *     Function: fcp_ipkt_sema_callback
3473  *
3474  *  Description: Registered as the completion callback function for the FC
3475  *		 transport when the ipkt semaphore is used for sync. This will
3476  *		 cleanup the used data structures, if necessary and wake up
3477  *		 the user thread to complete the transaction.
3478  *
3479  *     Argument: fpkt	FC packet (points to the icmd)
3480  *
3481  * Return Value: None
3482  *
3483  *	Context: User context only
3484  */
3485 static void
3486 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3487 {
3488 	struct fcp_ipkt	*icmd;
3489 
3490 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3491 
3492 	/*
3493 	 * Wake up user thread
3494 	 */
3495 	sema_v(&(icmd->ipkt_sema));
3496 }
3497 
3498 /*
3499  *     Function: fcp_ipkt_sema_cleanup
3500  *
3501  *  Description: Called to cleanup (if necessary) the data structures used
3502  *		 when ipkt sema is used for sync.  This function will detect
3503  *		 whether the caller is the last thread (via counter) and
3504  *		 cleanup only if necessary.
3505  *
3506  *     Argument: icmd	Internal command packet
3507  *
3508  * Return Value: None
3509  *
3510  *	Context: User context only
3511  */
3512 static void
3513 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3514 {
3515 	struct fcp_tgt	*ptgt;
3516 	struct fcp_port	*pptr;
3517 
3518 	ptgt = icmd->ipkt_tgt;
3519 	pptr = icmd->ipkt_port;
3520 
3521 	/*
3522 	 * Acquire data structure
3523 	 */
3524 	mutex_enter(&ptgt->tgt_mutex);
3525 
3526 	/*
3527 	 * Destroy semaphore
3528 	 */
3529 	sema_destroy(&(icmd->ipkt_sema));
3530 
3531 	/*
3532 	 * Cleanup internal packet
3533 	 */
3534 	mutex_exit(&ptgt->tgt_mutex);
3535 	fcp_icmd_free(pptr, icmd);
3536 }
3537 
3538 /*
3539  *     Function: fcp_port_attach
3540  *
3541  *  Description: Called by the transport framework to resume, suspend or
3542  *		 attach a new port.
3543  *
3544  *     Argument: ulph		Port handle
3545  *		 *pinfo		Port information
3546  *		 cmd		Command
3547  *		 s_id		Port ID
3548  *
3549  * Return Value: FC_FAILURE or FC_SUCCESS
3550  */
3551 /*ARGSUSED*/
3552 static int
3553 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3554     fc_attach_cmd_t cmd, uint32_t s_id)
3555 {
3556 	int	instance;
3557 	int	res = FC_FAILURE; /* default result */
3558 
3559 	ASSERT(pinfo != NULL);
3560 
3561 	instance = ddi_get_instance(pinfo->port_dip);
3562 
3563 	switch (cmd) {
3564 	case FC_CMD_ATTACH:
3565 		/*
3566 		 * this port instance attaching for the first time (or after
3567 		 * being detached before)
3568 		 */
3569 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3570 		    instance) == DDI_SUCCESS) {
3571 			res = FC_SUCCESS;
3572 		} else {
3573 			ASSERT(ddi_get_soft_state(fcp_softstate,
3574 			    instance) == NULL);
3575 		}
3576 		break;
3577 
3578 	case FC_CMD_RESUME:
3579 	case FC_CMD_POWER_UP:
3580 		/*
3581 		 * this port instance was attached and the suspended and
3582 		 * will now be resumed
3583 		 */
3584 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3585 		    instance) == DDI_SUCCESS) {
3586 			res = FC_SUCCESS;
3587 		}
3588 		break;
3589 
3590 	default:
3591 		/* shouldn't happen */
3592 		FCP_TRACE(fcp_logq, "fcp",
3593 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3594 		    "port_attach: unknown cmdcommand: %d", cmd);
3595 		break;
3596 	}
3597 
3598 	/* return result */
3599 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3600 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3601 
3602 	return (res);
3603 }
3604 
3605 
3606 /*
3607  * detach or suspend this port instance
3608  *
3609  * acquires and releases the global mutex
3610  *
3611  * acquires and releases the mutex for this port
3612  *
3613  * acquires and releases the hotplug mutex for this port
3614  */
3615 /*ARGSUSED*/
3616 static int
3617 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3618     fc_detach_cmd_t cmd)
3619 {
3620 	int			flag;
3621 	int			instance;
3622 	struct fcp_port		*pptr;
3623 
3624 	instance = ddi_get_instance(info->port_dip);
3625 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3626 
3627 	switch (cmd) {
3628 	case FC_CMD_SUSPEND:
3629 		FCP_DTRACE(fcp_logq, "fcp",
3630 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3631 		    "port suspend called for port %d", instance);
3632 		flag = FCP_STATE_SUSPENDED;
3633 		break;
3634 
3635 	case FC_CMD_POWER_DOWN:
3636 		FCP_DTRACE(fcp_logq, "fcp",
3637 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3638 		    "port power down called for port %d", instance);
3639 		flag = FCP_STATE_POWER_DOWN;
3640 		break;
3641 
3642 	case FC_CMD_DETACH:
3643 		FCP_DTRACE(fcp_logq, "fcp",
3644 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3645 		    "port detach called for port %d", instance);
3646 		flag = FCP_STATE_DETACHING;
3647 		break;
3648 
3649 	default:
3650 		/* shouldn't happen */
3651 		return (FC_FAILURE);
3652 	}
3653 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3654 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3655 
3656 	return (fcp_handle_port_detach(pptr, flag, instance));
3657 }
3658 
3659 
3660 /*
3661  * called for ioctls on the transport's devctl interface, and the transport
3662  * has passed it to us
3663  *
3664  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3665  *
3666  * return FC_SUCCESS if we decide to claim the ioctl,
3667  * else return FC_UNCLAIMED
3668  *
3669  * *rval is set iff we decide to claim the ioctl
3670  */
3671 /*ARGSUSED*/
3672 static int
3673 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3674     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3675 {
3676 	int			retval = FC_UNCLAIMED;	/* return value */
3677 	struct fcp_port		*pptr = NULL;		/* our soft state */
3678 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3679 	dev_info_t		*cdip;
3680 	mdi_pathinfo_t		*pip = NULL;
3681 	char			*ndi_nm;		/* NDI name */
3682 	char			*ndi_addr;		/* NDI addr */
3683 	int			is_mpxio, circ;
3684 	int			devi_entered = 0;
3685 	time_t			end_time;
3686 
3687 	ASSERT(rval != NULL);
3688 
3689 	FCP_DTRACE(fcp_logq, "fcp",
3690 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3691 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3692 
3693 	/* if already claimed then forget it */
3694 	if (claimed) {
3695 		/*
3696 		 * for now, if this ioctl has already been claimed, then
3697 		 * we just ignore it
3698 		 */
3699 		return (retval);
3700 	}
3701 
3702 	/* get our port info */
3703 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3704 		fcp_log(CE_WARN, NULL,
3705 		    "!fcp:Invalid port handle handle in ioctl");
3706 		*rval = ENXIO;
3707 		return (retval);
3708 	}
3709 	is_mpxio = pptr->port_mpxio;
3710 
3711 	switch (cmd) {
3712 	case DEVCTL_BUS_GETSTATE:
3713 	case DEVCTL_BUS_QUIESCE:
3714 	case DEVCTL_BUS_UNQUIESCE:
3715 	case DEVCTL_BUS_RESET:
3716 	case DEVCTL_BUS_RESETALL:
3717 
3718 	case DEVCTL_BUS_DEV_CREATE:
3719 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3720 			return (retval);
3721 		}
3722 		break;
3723 
3724 	case DEVCTL_DEVICE_GETSTATE:
3725 	case DEVCTL_DEVICE_OFFLINE:
3726 	case DEVCTL_DEVICE_ONLINE:
3727 	case DEVCTL_DEVICE_REMOVE:
3728 	case DEVCTL_DEVICE_RESET:
3729 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3730 			return (retval);
3731 		}
3732 
3733 		ASSERT(dcp != NULL);
3734 
3735 		/* ensure we have a name and address */
3736 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3737 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3738 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3739 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3740 			    "ioctl: can't get name (%s) or addr (%s)",
3741 			    ndi_nm ? ndi_nm : "<null ptr>",
3742 			    ndi_addr ? ndi_addr : "<null ptr>");
3743 			ndi_dc_freehdl(dcp);
3744 			return (retval);
3745 		}
3746 
3747 
3748 		/* get our child's DIP */
3749 		ASSERT(pptr != NULL);
3750 		if (is_mpxio) {
3751 			mdi_devi_enter(pptr->port_dip, &circ);
3752 		} else {
3753 			ndi_devi_enter(pptr->port_dip, &circ);
3754 		}
3755 		devi_entered = 1;
3756 
3757 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3758 		    ndi_addr)) == NULL) {
3759 			/* Look for virtually enumerated devices. */
3760 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3761 			if (pip == NULL ||
3762 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3763 				*rval = ENXIO;
3764 				goto out;
3765 			}
3766 		}
3767 		break;
3768 
3769 	default:
3770 		*rval = ENOTTY;
3771 		return (retval);
3772 	}
3773 
3774 	/* this ioctl is ours -- process it */
3775 
3776 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3777 
3778 	/* we assume it will be a success; else we'll set error value */
3779 	*rval = 0;
3780 
3781 
3782 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3783 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3784 	    "ioctl: claiming this one");
3785 
3786 	/* handle ioctls now */
3787 	switch (cmd) {
3788 	case DEVCTL_DEVICE_GETSTATE:
3789 		ASSERT(cdip != NULL);
3790 		ASSERT(dcp != NULL);
3791 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3792 			*rval = EFAULT;
3793 		}
3794 		break;
3795 
3796 	case DEVCTL_DEVICE_REMOVE:
3797 	case DEVCTL_DEVICE_OFFLINE: {
3798 		int			flag = 0;
3799 		int			lcount;
3800 		int			tcount;
3801 		struct fcp_pkt	*head = NULL;
3802 		struct fcp_lun	*plun;
3803 		child_info_t		*cip = CIP(cdip);
3804 		int			all = 1;
3805 		struct fcp_lun	*tplun;
3806 		struct fcp_tgt	*ptgt;
3807 
3808 		ASSERT(pptr != NULL);
3809 		ASSERT(cdip != NULL);
3810 
3811 		mutex_enter(&pptr->port_mutex);
3812 		if (pip != NULL) {
3813 			cip = CIP(pip);
3814 		}
3815 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3816 			mutex_exit(&pptr->port_mutex);
3817 			*rval = ENXIO;
3818 			break;
3819 		}
3820 
3821 		head = fcp_scan_commands(plun);
3822 		if (head != NULL) {
3823 			fcp_abort_commands(head, LUN_PORT);
3824 		}
3825 		lcount = pptr->port_link_cnt;
3826 		tcount = plun->lun_tgt->tgt_change_cnt;
3827 		mutex_exit(&pptr->port_mutex);
3828 
3829 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3830 			flag = NDI_DEVI_REMOVE;
3831 		}
3832 
3833 		if (is_mpxio) {
3834 			mdi_devi_exit(pptr->port_dip, circ);
3835 		} else {
3836 			ndi_devi_exit(pptr->port_dip, circ);
3837 		}
3838 		devi_entered = 0;
3839 
3840 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3841 		    FCP_OFFLINE, lcount, tcount, flag);
3842 
3843 		if (*rval != NDI_SUCCESS) {
3844 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3845 			break;
3846 		}
3847 
3848 		fcp_update_offline_flags(plun);
3849 
3850 		ptgt = plun->lun_tgt;
3851 		mutex_enter(&ptgt->tgt_mutex);
3852 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3853 		    tplun->lun_next) {
3854 			mutex_enter(&tplun->lun_mutex);
3855 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3856 				all = 0;
3857 			}
3858 			mutex_exit(&tplun->lun_mutex);
3859 		}
3860 
3861 		if (all) {
3862 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3863 			/*
3864 			 * The user is unconfiguring/offlining the device.
3865 			 * If fabric and the auto configuration is set
3866 			 * then make sure the user is the only one who
3867 			 * can reconfigure the device.
3868 			 */
3869 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3870 			    fcp_enable_auto_configuration) {
3871 				ptgt->tgt_manual_config_only = 1;
3872 			}
3873 		}
3874 		mutex_exit(&ptgt->tgt_mutex);
3875 		break;
3876 	}
3877 
3878 	case DEVCTL_DEVICE_ONLINE: {
3879 		int			lcount;
3880 		int			tcount;
3881 		struct fcp_lun	*plun;
3882 		child_info_t		*cip = CIP(cdip);
3883 
3884 		ASSERT(cdip != NULL);
3885 		ASSERT(pptr != NULL);
3886 
3887 		mutex_enter(&pptr->port_mutex);
3888 		if (pip != NULL) {
3889 			cip = CIP(pip);
3890 		}
3891 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3892 			mutex_exit(&pptr->port_mutex);
3893 			*rval = ENXIO;
3894 			break;
3895 		}
3896 		lcount = pptr->port_link_cnt;
3897 		tcount = plun->lun_tgt->tgt_change_cnt;
3898 		mutex_exit(&pptr->port_mutex);
3899 
3900 		/*
3901 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3902 		 * to allow the device attach to occur when the device is
3903 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3904 		 * from the scsi_probe()).
3905 		 */
3906 		mutex_enter(&LUN_TGT->tgt_mutex);
3907 		plun->lun_state |= FCP_LUN_ONLINING;
3908 		mutex_exit(&LUN_TGT->tgt_mutex);
3909 
3910 		if (is_mpxio) {
3911 			mdi_devi_exit(pptr->port_dip, circ);
3912 		} else {
3913 			ndi_devi_exit(pptr->port_dip, circ);
3914 		}
3915 		devi_entered = 0;
3916 
3917 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3918 		    FCP_ONLINE, lcount, tcount, 0);
3919 
3920 		if (*rval != NDI_SUCCESS) {
3921 			/* Reset the FCP_LUN_ONLINING bit */
3922 			mutex_enter(&LUN_TGT->tgt_mutex);
3923 			plun->lun_state &= ~FCP_LUN_ONLINING;
3924 			mutex_exit(&LUN_TGT->tgt_mutex);
3925 			*rval = EIO;
3926 			break;
3927 		}
3928 		mutex_enter(&LUN_TGT->tgt_mutex);
3929 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3930 		    FCP_LUN_ONLINING);
3931 		mutex_exit(&LUN_TGT->tgt_mutex);
3932 		break;
3933 	}
3934 
3935 	case DEVCTL_BUS_DEV_CREATE: {
3936 		uchar_t			*bytes = NULL;
3937 		uint_t			nbytes;
3938 		struct fcp_tgt		*ptgt = NULL;
3939 		struct fcp_lun		*plun = NULL;
3940 		dev_info_t		*useless_dip = NULL;
3941 
3942 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3943 		    DEVCTL_CONSTRUCT, &useless_dip);
3944 		if (*rval != 0 || useless_dip == NULL) {
3945 			break;
3946 		}
3947 
3948 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3949 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3950 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3951 			*rval = EINVAL;
3952 			(void) ndi_devi_free(useless_dip);
3953 			if (bytes != NULL) {
3954 				ddi_prop_free(bytes);
3955 			}
3956 			break;
3957 		}
3958 
3959 		*rval = fcp_create_on_demand(pptr, bytes);
3960 		if (*rval == 0) {
3961 			mutex_enter(&pptr->port_mutex);
3962 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3963 			if (ptgt) {
3964 				/*
3965 				 * We now have a pointer to the target that
3966 				 * was created. Lets point to the first LUN on
3967 				 * this new target.
3968 				 */
3969 				mutex_enter(&ptgt->tgt_mutex);
3970 
3971 				plun = ptgt->tgt_lun;
3972 				/*
3973 				 * There may be stale/offline LUN entries on
3974 				 * this list (this is by design) and so we have
3975 				 * to make sure we point to the first online
3976 				 * LUN
3977 				 */
3978 				while (plun &&
3979 				    plun->lun_state & FCP_LUN_OFFLINE) {
3980 					plun = plun->lun_next;
3981 				}
3982 
3983 				mutex_exit(&ptgt->tgt_mutex);
3984 			}
3985 			mutex_exit(&pptr->port_mutex);
3986 		}
3987 
3988 		if (*rval == 0 && ptgt && plun) {
3989 			mutex_enter(&plun->lun_mutex);
3990 			/*
3991 			 * Allow up to fcp_lun_ready_retry seconds to
3992 			 * configure all the luns behind the target.
3993 			 *
3994 			 * The intent here is to allow targets with long
3995 			 * reboot/reset-recovery times to become available
3996 			 * while limiting the maximum wait time for an
3997 			 * unresponsive target.
3998 			 */
3999 			end_time = ddi_get_lbolt() +
4000 			    SEC_TO_TICK(fcp_lun_ready_retry);
4001 
4002 			while (ddi_get_lbolt() < end_time) {
4003 				retval = FC_SUCCESS;
4004 
4005 				/*
4006 				 * The new ndi interfaces for on-demand creation
4007 				 * are inflexible, Do some more work to pass on
4008 				 * a path name of some LUN (design is broken !)
4009 				 */
4010 				if (plun->lun_cip) {
4011 					if (plun->lun_mpxio == 0) {
4012 						cdip = DIP(plun->lun_cip);
4013 					} else {
4014 						cdip = mdi_pi_get_client(
4015 						    PIP(plun->lun_cip));
4016 					}
4017 					if (cdip == NULL) {
4018 						*rval = ENXIO;
4019 						break;
4020 					}
4021 
4022 					if (!i_ddi_devi_attached(cdip)) {
4023 						mutex_exit(&plun->lun_mutex);
4024 						delay(drv_usectohz(1000000));
4025 						mutex_enter(&plun->lun_mutex);
4026 					} else {
4027 						/*
4028 						 * This Lun is ready, lets
4029 						 * check the next one.
4030 						 */
4031 						mutex_exit(&plun->lun_mutex);
4032 						plun = plun->lun_next;
4033 						while (plun && (plun->lun_state
4034 						    & FCP_LUN_OFFLINE)) {
4035 							plun = plun->lun_next;
4036 						}
4037 						if (!plun) {
4038 							break;
4039 						}
4040 						mutex_enter(&plun->lun_mutex);
4041 					}
4042 				} else {
4043 					/*
4044 					 * lun_cip field for a valid lun
4045 					 * should never be NULL. Fail the
4046 					 * command.
4047 					 */
4048 					*rval = ENXIO;
4049 					break;
4050 				}
4051 			}
4052 			if (plun) {
4053 				mutex_exit(&plun->lun_mutex);
4054 			} else {
4055 				char devnm[MAXNAMELEN];
4056 				int nmlen;
4057 
4058 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4059 				    ddi_node_name(cdip),
4060 				    ddi_get_name_addr(cdip));
4061 
4062 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4063 				    0) {
4064 					*rval = EFAULT;
4065 				}
4066 			}
4067 		} else {
4068 			int	i;
4069 			char	buf[25];
4070 
4071 			for (i = 0; i < FC_WWN_SIZE; i++) {
4072 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4073 			}
4074 
4075 			fcp_log(CE_WARN, pptr->port_dip,
4076 			    "!Failed to create nodes for pwwn=%s; error=%x",
4077 			    buf, *rval);
4078 		}
4079 
4080 		(void) ndi_devi_free(useless_dip);
4081 		ddi_prop_free(bytes);
4082 		break;
4083 	}
4084 
4085 	case DEVCTL_DEVICE_RESET: {
4086 		struct fcp_lun		*plun;
4087 		child_info_t		*cip = CIP(cdip);
4088 
4089 		ASSERT(cdip != NULL);
4090 		ASSERT(pptr != NULL);
4091 		mutex_enter(&pptr->port_mutex);
4092 		if (pip != NULL) {
4093 			cip = CIP(pip);
4094 		}
4095 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4096 			mutex_exit(&pptr->port_mutex);
4097 			*rval = ENXIO;
4098 			break;
4099 		}
4100 		mutex_exit(&pptr->port_mutex);
4101 
4102 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4103 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4104 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4105 
4106 			*rval = ENXIO;
4107 			break;
4108 		}
4109 
4110 		if (plun->lun_sd == NULL) {
4111 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4112 
4113 			*rval = ENXIO;
4114 			break;
4115 		}
4116 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4117 
4118 		/*
4119 		 * set up ap so that fcp_reset can figure out
4120 		 * which target to reset
4121 		 */
4122 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4123 		    RESET_TARGET) == FALSE) {
4124 			*rval = EIO;
4125 		}
4126 		break;
4127 	}
4128 
4129 	case DEVCTL_BUS_GETSTATE:
4130 		ASSERT(dcp != NULL);
4131 		ASSERT(pptr != NULL);
4132 		ASSERT(pptr->port_dip != NULL);
4133 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4134 		    NDI_SUCCESS) {
4135 			*rval = EFAULT;
4136 		}
4137 		break;
4138 
4139 	case DEVCTL_BUS_QUIESCE:
4140 	case DEVCTL_BUS_UNQUIESCE:
4141 		*rval = ENOTSUP;
4142 		break;
4143 
4144 	case DEVCTL_BUS_RESET:
4145 	case DEVCTL_BUS_RESETALL:
4146 		ASSERT(pptr != NULL);
4147 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4148 		break;
4149 
4150 	default:
4151 		ASSERT(dcp != NULL);
4152 		*rval = ENOTTY;
4153 		break;
4154 	}
4155 
4156 	/* all done -- clean up and return */
4157 out:	if (devi_entered) {
4158 		if (is_mpxio) {
4159 			mdi_devi_exit(pptr->port_dip, circ);
4160 		} else {
4161 			ndi_devi_exit(pptr->port_dip, circ);
4162 		}
4163 	}
4164 
4165 	if (dcp != NULL) {
4166 		ndi_dc_freehdl(dcp);
4167 	}
4168 
4169 	return (retval);
4170 }
4171 
4172 
4173 /*ARGSUSED*/
4174 static int
4175 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4176     uint32_t claimed)
4177 {
4178 	uchar_t			r_ctl;
4179 	uchar_t			ls_code;
4180 	struct fcp_port	*pptr;
4181 
4182 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4183 		return (FC_UNCLAIMED);
4184 	}
4185 
4186 	mutex_enter(&pptr->port_mutex);
4187 	if (pptr->port_state & (FCP_STATE_DETACHING |
4188 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4189 		mutex_exit(&pptr->port_mutex);
4190 		return (FC_UNCLAIMED);
4191 	}
4192 	mutex_exit(&pptr->port_mutex);
4193 
4194 	r_ctl = buf->ub_frame.r_ctl;
4195 
4196 	switch (r_ctl & R_CTL_ROUTING) {
4197 	case R_CTL_EXTENDED_SVC:
4198 		if (r_ctl == R_CTL_ELS_REQ) {
4199 			ls_code = buf->ub_buffer[0];
4200 
4201 			switch (ls_code) {
4202 			case LA_ELS_PRLI:
4203 				/*
4204 				 * We really don't care if something fails.
4205 				 * If the PRLI was not sent out, then the
4206 				 * other end will time it out.
4207 				 */
4208 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4209 					return (FC_SUCCESS);
4210 				}
4211 				return (FC_UNCLAIMED);
4212 				/* NOTREACHED */
4213 
4214 			default:
4215 				break;
4216 			}
4217 		}
4218 		/* FALLTHROUGH */
4219 
4220 	default:
4221 		return (FC_UNCLAIMED);
4222 	}
4223 }
4224 
4225 
4226 /*ARGSUSED*/
4227 static int
4228 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4229     uint32_t claimed)
4230 {
4231 	return (FC_UNCLAIMED);
4232 }
4233 
4234 /*
4235  *     Function: fcp_statec_callback
4236  *
4237  *  Description: The purpose of this function is to handle a port state change.
4238  *		 It is called from fp/fctl and, in a few instances, internally.
4239  *
4240  *     Argument: ulph		fp/fctl port handle
4241  *		 port_handle	fcp_port structure
4242  *		 port_state	Physical state of the port
4243  *		 port_top	Topology
4244  *		 *devlist	Pointer to the first entry of a table
4245  *				containing the remote ports that can be
4246  *				reached.
4247  *		 dev_cnt	Number of entries pointed by devlist.
4248  *		 port_sid	Port ID of the local port.
4249  *
4250  * Return Value: None
4251  */
4252 /*ARGSUSED*/
4253 static void
4254 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4255     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4256     uint32_t dev_cnt, uint32_t port_sid)
4257 {
4258 	uint32_t		link_count;
4259 	int			map_len = 0;
4260 	struct fcp_port	*pptr;
4261 	fcp_map_tag_t		*map_tag = NULL;
4262 
4263 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4264 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4265 		return;			/* nothing to work with! */
4266 	}
4267 
4268 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4269 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4270 	    "fcp_statec_callback: port state/dev_cnt/top ="
4271 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4272 	    dev_cnt, port_top);
4273 
4274 	mutex_enter(&pptr->port_mutex);
4275 
4276 	/*
4277 	 * If a thread is in detach, don't do anything.
4278 	 */
4279 	if (pptr->port_state & (FCP_STATE_DETACHING |
4280 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4281 		mutex_exit(&pptr->port_mutex);
4282 		return;
4283 	}
4284 
4285 	/*
4286 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4287 	 * init_pkt is called, it knows whether or not the target's status
4288 	 * (or pd) might be changing.
4289 	 */
4290 
4291 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4292 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4293 	}
4294 
4295 	/*
4296 	 * the transport doesn't allocate or probe unless being
4297 	 * asked to by either the applications or ULPs
4298 	 *
4299 	 * in cases where the port is OFFLINE at the time of port
4300 	 * attach callback and the link comes ONLINE later, for
4301 	 * easier automatic node creation (i.e. without you having to
4302 	 * go out and run the utility to perform LOGINs) the
4303 	 * following conditional is helpful
4304 	 */
4305 	pptr->port_phys_state = port_state;
4306 
4307 	if (dev_cnt) {
4308 		mutex_exit(&pptr->port_mutex);
4309 
4310 		map_len = sizeof (*map_tag) * dev_cnt;
4311 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4312 		if (map_tag == NULL) {
4313 			fcp_log(CE_WARN, pptr->port_dip,
4314 			    "!fcp%d: failed to allocate for map tags; "
4315 			    " state change will not be processed",
4316 			    pptr->port_instance);
4317 
4318 			mutex_enter(&pptr->port_mutex);
4319 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4320 			mutex_exit(&pptr->port_mutex);
4321 
4322 			return;
4323 		}
4324 
4325 		mutex_enter(&pptr->port_mutex);
4326 	}
4327 
4328 	if (pptr->port_id != port_sid) {
4329 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4330 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4331 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4332 		    port_sid);
4333 		/*
4334 		 * The local port changed ID. It is the first time a port ID
4335 		 * is assigned or something drastic happened.  We might have
4336 		 * been unplugged and replugged on another loop or fabric port
4337 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4338 		 * the fabric we were plugged into.
4339 		 */
4340 		pptr->port_id = port_sid;
4341 	}
4342 
4343 	switch (FC_PORT_STATE_MASK(port_state)) {
4344 	case FC_STATE_OFFLINE:
4345 	case FC_STATE_RESET_REQUESTED:
4346 		/*
4347 		 * link has gone from online to offline -- just update the
4348 		 * state of this port to BUSY and MARKed to go offline
4349 		 */
4350 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4351 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4352 		    "link went offline");
4353 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4354 			/*
4355 			 * We were offline a while ago and this one
4356 			 * seems to indicate that the loop has gone
4357 			 * dead forever.
4358 			 */
4359 			pptr->port_tmp_cnt += dev_cnt;
4360 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4361 			pptr->port_state |= FCP_STATE_INIT;
4362 			link_count = pptr->port_link_cnt;
4363 			fcp_handle_devices(pptr, devlist, dev_cnt,
4364 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4365 		} else {
4366 			pptr->port_link_cnt++;
4367 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4368 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4369 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4370 			if (pptr->port_mpxio) {
4371 				fcp_update_mpxio_path_verifybusy(pptr);
4372 			}
4373 			pptr->port_state |= FCP_STATE_OFFLINE;
4374 			pptr->port_state &=
4375 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4376 			pptr->port_tmp_cnt = 0;
4377 		}
4378 		mutex_exit(&pptr->port_mutex);
4379 		break;
4380 
4381 	case FC_STATE_ONLINE:
4382 	case FC_STATE_LIP:
4383 	case FC_STATE_LIP_LBIT_SET:
4384 		/*
4385 		 * link has gone from offline to online
4386 		 */
4387 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4388 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4389 		    "link went online");
4390 
4391 		pptr->port_link_cnt++;
4392 
4393 		while (pptr->port_ipkt_cnt) {
4394 			mutex_exit(&pptr->port_mutex);
4395 			delay(drv_usectohz(1000000));
4396 			mutex_enter(&pptr->port_mutex);
4397 		}
4398 
4399 		pptr->port_topology = port_top;
4400 
4401 		/*
4402 		 * The state of the targets and luns accessible through this
4403 		 * port is updated.
4404 		 */
4405 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4406 		    FCP_CAUSE_LINK_CHANGE);
4407 
4408 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4409 		pptr->port_state |= FCP_STATE_ONLINING;
4410 		pptr->port_tmp_cnt = dev_cnt;
4411 		link_count = pptr->port_link_cnt;
4412 
4413 		pptr->port_deadline = fcp_watchdog_time +
4414 		    FCP_ICMD_DEADLINE;
4415 
4416 		if (!dev_cnt) {
4417 			/*
4418 			 * We go directly to the online state if no remote
4419 			 * ports were discovered.
4420 			 */
4421 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4422 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4423 			    "No remote ports discovered");
4424 
4425 			pptr->port_state &= ~FCP_STATE_ONLINING;
4426 			pptr->port_state |= FCP_STATE_ONLINE;
4427 		}
4428 
4429 		switch (port_top) {
4430 		case FC_TOP_FABRIC:
4431 		case FC_TOP_PUBLIC_LOOP:
4432 		case FC_TOP_PRIVATE_LOOP:
4433 		case FC_TOP_PT_PT:
4434 
4435 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4436 				fcp_retry_ns_registry(pptr, port_sid);
4437 			}
4438 
4439 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4440 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4441 			break;
4442 
4443 		default:
4444 			/*
4445 			 * We got here because we were provided with an unknown
4446 			 * topology.
4447 			 */
4448 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4449 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4450 			}
4451 
4452 			pptr->port_tmp_cnt -= dev_cnt;
4453 			fcp_log(CE_WARN, pptr->port_dip,
4454 			    "!unknown/unsupported topology (0x%x)", port_top);
4455 			break;
4456 		}
4457 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4458 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4459 		    "Notify ssd of the reset to reinstate the reservations");
4460 
4461 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4462 		    &pptr->port_reset_notify_listf);
4463 
4464 		mutex_exit(&pptr->port_mutex);
4465 
4466 		break;
4467 
4468 	case FC_STATE_RESET:
4469 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4470 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4471 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4472 		    "RESET state, waiting for Offline/Online state_cb");
4473 		mutex_exit(&pptr->port_mutex);
4474 		break;
4475 
4476 	case FC_STATE_DEVICE_CHANGE:
4477 		/*
4478 		 * We come here when an application has requested
4479 		 * Dynamic node creation/deletion in Fabric connectivity.
4480 		 */
4481 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4482 		    FCP_STATE_INIT)) {
4483 			/*
4484 			 * This case can happen when the FCTL is in the
4485 			 * process of giving us on online and the host on
4486 			 * the other side issues a PLOGI/PLOGO. Ideally
4487 			 * the state changes should be serialized unless
4488 			 * they are opposite (online-offline).
4489 			 * The transport will give us a final state change
4490 			 * so we can ignore this for the time being.
4491 			 */
4492 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4493 			mutex_exit(&pptr->port_mutex);
4494 			break;
4495 		}
4496 
4497 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4498 			fcp_retry_ns_registry(pptr, port_sid);
4499 		}
4500 
4501 		/*
4502 		 * Extend the deadline under steady state conditions
4503 		 * to provide more time for the device-change-commands
4504 		 */
4505 		if (!pptr->port_ipkt_cnt) {
4506 			pptr->port_deadline = fcp_watchdog_time +
4507 			    FCP_ICMD_DEADLINE;
4508 		}
4509 
4510 		/*
4511 		 * There is another race condition here, where if we were
4512 		 * in ONLINEING state and a devices in the map logs out,
4513 		 * fp will give another state change as DEVICE_CHANGE
4514 		 * and OLD. This will result in that target being offlined.
4515 		 * The pd_handle is freed. If from the first statec callback
4516 		 * we were going to fire a PLOGI/PRLI, the system will
4517 		 * panic in fc_ulp_transport with invalid pd_handle.
4518 		 * The fix is to check for the link_cnt before issuing
4519 		 * any command down.
4520 		 */
4521 		fcp_update_targets(pptr, devlist, dev_cnt,
4522 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4523 
4524 		link_count = pptr->port_link_cnt;
4525 
4526 		fcp_handle_devices(pptr, devlist, dev_cnt,
4527 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4528 
4529 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4530 
4531 		mutex_exit(&pptr->port_mutex);
4532 		break;
4533 
4534 	case FC_STATE_TARGET_PORT_RESET:
4535 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 			fcp_retry_ns_registry(pptr, port_sid);
4537 		}
4538 
4539 		/* Do nothing else */
4540 		mutex_exit(&pptr->port_mutex);
4541 		break;
4542 
4543 	default:
4544 		fcp_log(CE_WARN, pptr->port_dip,
4545 		    "!Invalid state change=0x%x", port_state);
4546 		mutex_exit(&pptr->port_mutex);
4547 		break;
4548 	}
4549 
4550 	if (map_tag) {
4551 		kmem_free(map_tag, map_len);
4552 	}
4553 }
4554 
4555 /*
4556  *     Function: fcp_handle_devices
4557  *
4558  *  Description: This function updates the devices currently known by
4559  *		 walking the list provided by the caller.  The list passed
4560  *		 by the caller is supposed to be the list of reachable
4561  *		 devices.
4562  *
4563  *     Argument: *pptr		Fcp port structure.
4564  *		 *devlist	Pointer to the first entry of a table
4565  *				containing the remote ports that can be
4566  *				reached.
4567  *		 dev_cnt	Number of entries pointed by devlist.
4568  *		 link_cnt	Link state count.
4569  *		 *map_tag	Array of fcp_map_tag_t structures.
4570  *		 cause		What caused this function to be called.
4571  *
4572  * Return Value: None
4573  *
4574  *	  Notes: The pptr->port_mutex must be held.
4575  */
4576 static void
4577 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4578     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4579 {
4580 	int			i;
4581 	int			check_finish_init = 0;
4582 	fc_portmap_t		*map_entry;
4583 	struct fcp_tgt	*ptgt = NULL;
4584 
4585 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4586 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4587 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4588 
4589 	if (dev_cnt) {
4590 		ASSERT(map_tag != NULL);
4591 	}
4592 
4593 	/*
4594 	 * The following code goes through the list of remote ports that are
4595 	 * accessible through this (pptr) local port (The list walked is the
4596 	 * one provided by the caller which is the list of the remote ports
4597 	 * currently reachable).  It checks if any of them was already
4598 	 * known by looking for the corresponding target structure based on
4599 	 * the world wide name.	 If a target is part of the list it is tagged
4600 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4601 	 *
4602 	 * Old comment
4603 	 * -----------
4604 	 * Before we drop port mutex; we MUST get the tags updated; This
4605 	 * two step process is somewhat slow, but more reliable.
4606 	 */
4607 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4608 		map_entry = &(devlist[i]);
4609 
4610 		/*
4611 		 * get ptr to this map entry in our port's
4612 		 * list (if any)
4613 		 */
4614 		ptgt = fcp_lookup_target(pptr,
4615 		    (uchar_t *)&(map_entry->map_pwwn));
4616 
4617 		if (ptgt) {
4618 			map_tag[i] = ptgt->tgt_change_cnt;
4619 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4620 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4621 			}
4622 		}
4623 	}
4624 
4625 	/*
4626 	 * At this point we know which devices of the new list were already
4627 	 * known (The field tgt_aux_state of the target structure has been
4628 	 * set to FCP_TGT_TAGGED).
4629 	 *
4630 	 * The following code goes through the list of targets currently known
4631 	 * by the local port (the list is actually a hashing table).  If a
4632 	 * target is found and is not tagged, it means the target cannot
4633 	 * be reached anymore through the local port (pptr).  It is offlined.
4634 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4635 	 */
4636 	for (i = 0; i < FCP_NUM_HASH; i++) {
4637 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4638 		    ptgt = ptgt->tgt_next) {
4639 			mutex_enter(&ptgt->tgt_mutex);
4640 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4641 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4642 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4643 				fcp_offline_target_now(pptr, ptgt,
4644 				    link_cnt, ptgt->tgt_change_cnt, 0);
4645 			}
4646 			mutex_exit(&ptgt->tgt_mutex);
4647 		}
4648 	}
4649 
4650 	/*
4651 	 * At this point, the devices that were known but cannot be reached
4652 	 * anymore, have most likely been offlined.
4653 	 *
4654 	 * The following section of code seems to go through the list of
4655 	 * remote ports that can now be reached.  For every single one it
4656 	 * checks if it is already known or if it is a new port.
4657 	 */
4658 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4659 
4660 		if (check_finish_init) {
4661 			ASSERT(i > 0);
4662 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4663 			    map_tag[i - 1], cause);
4664 			check_finish_init = 0;
4665 		}
4666 
4667 		/* get a pointer to this map entry */
4668 		map_entry = &(devlist[i]);
4669 
4670 		/*
4671 		 * Check for the duplicate map entry flag. If we have marked
4672 		 * this entry as a duplicate we skip it since the correct
4673 		 * (perhaps even same) state change will be encountered
4674 		 * later in the list.
4675 		 */
4676 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4677 			continue;
4678 		}
4679 
4680 		/* get ptr to this map entry in our port's list (if any) */
4681 		ptgt = fcp_lookup_target(pptr,
4682 		    (uchar_t *)&(map_entry->map_pwwn));
4683 
4684 		if (ptgt) {
4685 			/*
4686 			 * This device was already known.  The field
4687 			 * tgt_aux_state is reset (was probably set to
4688 			 * FCP_TGT_TAGGED previously in this routine).
4689 			 */
4690 			ptgt->tgt_aux_state = 0;
4691 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4692 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4693 			    "handle_devices: map did/state/type/flags = "
4694 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4695 			    "tgt_state=%d",
4696 			    map_entry->map_did.port_id, map_entry->map_state,
4697 			    map_entry->map_type, map_entry->map_flags,
4698 			    ptgt->tgt_d_id, ptgt->tgt_state);
4699 		}
4700 
4701 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4702 		    map_entry->map_type == PORT_DEVICE_NEW ||
4703 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4704 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4705 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4706 			    "map_type=%x, did = %x",
4707 			    map_entry->map_type,
4708 			    map_entry->map_did.port_id);
4709 		}
4710 
4711 		switch (map_entry->map_type) {
4712 		case PORT_DEVICE_NOCHANGE:
4713 		case PORT_DEVICE_USER_CREATE:
4714 		case PORT_DEVICE_USER_LOGIN:
4715 		case PORT_DEVICE_NEW:
4716 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4717 
4718 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4719 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4720 			    cause) == TRUE) {
4721 
4722 				FCP_TGT_TRACE(ptgt, map_tag[i],
4723 				    FCP_TGT_TRACE_2);
4724 				check_finish_init++;
4725 			}
4726 			break;
4727 
4728 		case PORT_DEVICE_OLD:
4729 			if (ptgt != NULL) {
4730 				FCP_TGT_TRACE(ptgt, map_tag[i],
4731 				    FCP_TGT_TRACE_3);
4732 
4733 				mutex_enter(&ptgt->tgt_mutex);
4734 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4735 					/*
4736 					 * Must do an in-line wait for I/Os
4737 					 * to get drained
4738 					 */
4739 					mutex_exit(&ptgt->tgt_mutex);
4740 					mutex_exit(&pptr->port_mutex);
4741 
4742 					mutex_enter(&ptgt->tgt_mutex);
4743 					while (ptgt->tgt_ipkt_cnt ||
4744 					    fcp_outstanding_lun_cmds(ptgt)
4745 					    == FC_SUCCESS) {
4746 						mutex_exit(&ptgt->tgt_mutex);
4747 						delay(drv_usectohz(1000000));
4748 						mutex_enter(&ptgt->tgt_mutex);
4749 					}
4750 					mutex_exit(&ptgt->tgt_mutex);
4751 
4752 					mutex_enter(&pptr->port_mutex);
4753 					mutex_enter(&ptgt->tgt_mutex);
4754 
4755 					(void) fcp_offline_target(pptr, ptgt,
4756 					    link_cnt, map_tag[i], 0, 0);
4757 				}
4758 				mutex_exit(&ptgt->tgt_mutex);
4759 			}
4760 			check_finish_init++;
4761 			break;
4762 
4763 		case PORT_DEVICE_USER_DELETE:
4764 		case PORT_DEVICE_USER_LOGOUT:
4765 			if (ptgt != NULL) {
4766 				FCP_TGT_TRACE(ptgt, map_tag[i],
4767 				    FCP_TGT_TRACE_4);
4768 
4769 				mutex_enter(&ptgt->tgt_mutex);
4770 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4771 					(void) fcp_offline_target(pptr, ptgt,
4772 					    link_cnt, map_tag[i], 1, 0);
4773 				}
4774 				mutex_exit(&ptgt->tgt_mutex);
4775 			}
4776 			check_finish_init++;
4777 			break;
4778 
4779 		case PORT_DEVICE_CHANGED:
4780 			if (ptgt != NULL) {
4781 				FCP_TGT_TRACE(ptgt, map_tag[i],
4782 				    FCP_TGT_TRACE_5);
4783 
4784 				if (fcp_device_changed(pptr, ptgt,
4785 				    map_entry, link_cnt, map_tag[i],
4786 				    cause) == TRUE) {
4787 					check_finish_init++;
4788 				}
4789 			} else {
4790 				if (fcp_handle_mapflags(pptr, ptgt,
4791 				    map_entry, link_cnt, 0, cause) == TRUE) {
4792 					check_finish_init++;
4793 				}
4794 			}
4795 			break;
4796 
4797 		default:
4798 			fcp_log(CE_WARN, pptr->port_dip,
4799 			    "!Invalid map_type=0x%x", map_entry->map_type);
4800 			check_finish_init++;
4801 			break;
4802 		}
4803 	}
4804 
4805 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4806 		ASSERT(i > 0);
4807 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4808 		    map_tag[i-1], cause);
4809 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4810 		fcp_offline_all(pptr, link_cnt, cause);
4811 	}
4812 }
4813 
4814 /*
4815  *     Function: fcp_handle_mapflags
4816  *
4817  *  Description: This function creates a target structure if the ptgt passed
4818  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4819  *		 into the target yet or the PRLI if we are logged into the
4820  *		 target already.  The rest of the treatment is done in the
4821  *		 callbacks of the PLOGI or PRLI.
4822  *
4823  *     Argument: *pptr		FCP Port structure.
4824  *		 *ptgt		Target structure.
4825  *		 *map_entry	Array of fc_portmap_t structures.
4826  *		 link_cnt	Link state count.
4827  *		 tgt_cnt	Target state count.
4828  *		 cause		What caused this function to be called.
4829  *
4830  * Return Value: TRUE	Failed
4831  *		 FALSE	Succeeded
4832  *
4833  *	  Notes: pptr->port_mutex must be owned.
4834  */
4835 static int
4836 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4837     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4838 {
4839 	int			lcount;
4840 	int			tcount;
4841 	int			ret = TRUE;
4842 	int			alloc;
4843 	struct fcp_ipkt	*icmd;
4844 	struct fcp_lun	*pseq_lun = NULL;
4845 	uchar_t			opcode;
4846 	int			valid_ptgt_was_passed = FALSE;
4847 
4848 	ASSERT(mutex_owned(&pptr->port_mutex));
4849 
4850 	/*
4851 	 * This case is possible where the FCTL has come up and done discovery
4852 	 * before FCP was loaded and attached. FCTL would have discovered the
4853 	 * devices and later the ULP came online. In this case ULP's would get
4854 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4855 	 */
4856 	if (ptgt == NULL) {
4857 		/* don't already have a target */
4858 		mutex_exit(&pptr->port_mutex);
4859 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4860 		mutex_enter(&pptr->port_mutex);
4861 
4862 		if (ptgt == NULL) {
4863 			fcp_log(CE_WARN, pptr->port_dip,
4864 			    "!FC target allocation failed");
4865 			return (ret);
4866 		}
4867 		mutex_enter(&ptgt->tgt_mutex);
4868 		ptgt->tgt_statec_cause = cause;
4869 		ptgt->tgt_tmp_cnt = 1;
4870 		mutex_exit(&ptgt->tgt_mutex);
4871 	} else {
4872 		valid_ptgt_was_passed = TRUE;
4873 	}
4874 
4875 	/*
4876 	 * Copy in the target parameters
4877 	 */
4878 	mutex_enter(&ptgt->tgt_mutex);
4879 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4880 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4881 	ptgt->tgt_pd_handle = map_entry->map_pd;
4882 	ptgt->tgt_fca_dev = NULL;
4883 
4884 	/* Copy port and node WWNs */
4885 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4886 	    FC_WWN_SIZE);
4887 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4888 	    FC_WWN_SIZE);
4889 
4890 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4891 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4892 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4893 	    valid_ptgt_was_passed) {
4894 		/*
4895 		 * determine if there are any tape LUNs on this target
4896 		 */
4897 		for (pseq_lun = ptgt->tgt_lun;
4898 		    pseq_lun != NULL;
4899 		    pseq_lun = pseq_lun->lun_next) {
4900 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4901 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4902 				fcp_update_tgt_state(ptgt, FCP_RESET,
4903 				    FCP_LUN_MARK);
4904 				mutex_exit(&ptgt->tgt_mutex);
4905 				return (ret);
4906 			}
4907 		}
4908 	}
4909 
4910 	/*
4911 	 * If ptgt was NULL when this function was entered, then tgt_node_state
4912 	 * was never specifically initialized but zeroed out which means
4913 	 * FCP_TGT_NODE_NONE.
4914 	 */
4915 	switch (ptgt->tgt_node_state) {
4916 	case FCP_TGT_NODE_NONE:
4917 	case FCP_TGT_NODE_ON_DEMAND:
4918 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4919 		    !fcp_enable_auto_configuration &&
4920 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4921 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4922 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4923 		    fcp_enable_auto_configuration &&
4924 		    (ptgt->tgt_manual_config_only == 1) &&
4925 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4926 			/*
4927 			 * If auto configuration is set and
4928 			 * the tgt_manual_config_only flag is set then
4929 			 * we only want the user to be able to change
4930 			 * the state through create_on_demand.
4931 			 */
4932 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4933 		} else {
4934 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4935 		}
4936 		break;
4937 
4938 	case FCP_TGT_NODE_PRESENT:
4939 		break;
4940 	}
4941 	/*
4942 	 * If we are booting from a fabric device, make sure we
4943 	 * mark the node state appropriately for this target to be
4944 	 * enumerated
4945 	 */
4946 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
4947 		if (bcmp((caddr_t)pptr->port_boot_wwn,
4948 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
4949 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
4950 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4951 		}
4952 	}
4953 	mutex_exit(&ptgt->tgt_mutex);
4954 
4955 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4956 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4957 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
4958 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
4959 	    map_entry->map_rscn_info.ulp_rscn_count);
4960 
4961 	mutex_enter(&ptgt->tgt_mutex);
4962 
4963 	/*
4964 	 * Reset target OFFLINE state and mark the target BUSY
4965 	 */
4966 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
4967 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
4968 
4969 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
4970 	lcount = link_cnt;
4971 
4972 	mutex_exit(&ptgt->tgt_mutex);
4973 	mutex_exit(&pptr->port_mutex);
4974 
4975 	/*
4976 	 * if we are already logged in, then we do a PRLI, else
4977 	 * we do a PLOGI first (to get logged in)
4978 	 *
4979 	 * We will not check if we are the PLOGI initiator
4980 	 */
4981 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
4982 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
4983 
4984 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
4985 
4986 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount,
4987 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
4988 
4989 	if (icmd == NULL) {
4990 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
4991 		/*
4992 		 * We've exited port_mutex before calling fcp_icmd_alloc,
4993 		 * we need to make sure we reacquire it before returning.
4994 		 */
4995 		mutex_enter(&pptr->port_mutex);
4996 		return (FALSE);
4997 	}
4998 
4999 	/* TRUE is only returned while target is intended skipped */
5000 	ret = FALSE;
5001 	/* discover info about this target */
5002 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5003 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5004 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5005 	} else {
5006 		fcp_icmd_free(pptr, icmd);
5007 		ret = TRUE;
5008 	}
5009 	mutex_enter(&pptr->port_mutex);
5010 
5011 	return (ret);
5012 }
5013 
5014 /*
5015  *     Function: fcp_send_els
5016  *
5017  *  Description: Sends an ELS to the target specified by the caller.  Supports
5018  *		 PLOGI and PRLI.
5019  *
5020  *     Argument: *pptr		Fcp port.
5021  *		 *ptgt		Target to send the ELS to.
5022  *		 *icmd		Internal packet
5023  *		 opcode		ELS opcode
5024  *		 lcount		Link state change counter
5025  *		 tcount		Target state change counter
5026  *		 cause		What caused the call
5027  *
5028  * Return Value: DDI_SUCCESS
5029  *		 Others
5030  */
5031 static int
5032 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5033     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5034 {
5035 	fc_packet_t		*fpkt;
5036 	fc_frame_hdr_t		*hp;
5037 	int			internal = 0;
5038 	int			alloc;
5039 	int			cmd_len;
5040 	int			resp_len;
5041 	int			res = DDI_FAILURE; /* default result */
5042 	int			rval = DDI_FAILURE;
5043 
5044 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5045 	ASSERT(ptgt->tgt_port == pptr);
5046 
5047 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5048 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5049 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5050 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5051 
5052 	if (opcode == LA_ELS_PLOGI) {
5053 		cmd_len = sizeof (la_els_logi_t);
5054 		resp_len = sizeof (la_els_logi_t);
5055 	} else {
5056 		ASSERT(opcode == LA_ELS_PRLI);
5057 		cmd_len = sizeof (la_els_prli_t);
5058 		resp_len = sizeof (la_els_prli_t);
5059 	}
5060 
5061 	if (icmd == NULL) {
5062 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5063 		    sizeof (la_els_prli_t));
5064 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0,
5065 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5066 		if (icmd == NULL) {
5067 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5068 			return (res);
5069 		}
5070 		internal++;
5071 	}
5072 	fpkt = icmd->ipkt_fpkt;
5073 
5074 	fpkt->pkt_cmdlen = cmd_len;
5075 	fpkt->pkt_rsplen = resp_len;
5076 	fpkt->pkt_datalen = 0;
5077 	icmd->ipkt_retries = 0;
5078 
5079 	/* fill in fpkt info */
5080 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5081 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5082 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5083 
5084 	/* get ptr to frame hdr in fpkt */
5085 	hp = &fpkt->pkt_cmd_fhdr;
5086 
5087 	/*
5088 	 * fill in frame hdr
5089 	 */
5090 	hp->r_ctl = R_CTL_ELS_REQ;
5091 	hp->s_id = pptr->port_id;	/* source ID */
5092 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5093 	hp->type = FC_TYPE_EXTENDED_LS;
5094 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5095 	hp->seq_id = 0;
5096 	hp->rsvd = 0;
5097 	hp->df_ctl  = 0;
5098 	hp->seq_cnt = 0;
5099 	hp->ox_id = 0xffff;		/* i.e. none */
5100 	hp->rx_id = 0xffff;		/* i.e. none */
5101 	hp->ro = 0;
5102 
5103 	/*
5104 	 * at this point we have a filled in cmd pkt
5105 	 *
5106 	 * fill in the respective info, then use the transport to send
5107 	 * the packet
5108 	 *
5109 	 * for a PLOGI call fc_ulp_login(), and
5110 	 * for a PRLI call fc_ulp_issue_els()
5111 	 */
5112 	switch (opcode) {
5113 	case LA_ELS_PLOGI: {
5114 		struct la_els_logi logi;
5115 
5116 		bzero(&logi, sizeof (struct la_els_logi));
5117 
5118 		hp = &fpkt->pkt_cmd_fhdr;
5119 		hp->r_ctl = R_CTL_ELS_REQ;
5120 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5121 		logi.ls_code.mbz = 0;
5122 
5123 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5124 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5125 
5126 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5127 
5128 		mutex_enter(&pptr->port_mutex);
5129 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5130 
5131 			mutex_exit(&pptr->port_mutex);
5132 
5133 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5134 			if (rval == FC_SUCCESS) {
5135 				res = DDI_SUCCESS;
5136 				break;
5137 			}
5138 
5139 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5140 
5141 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5142 			    rval, "PLOGI");
5143 		} else {
5144 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5145 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5146 			    "fcp_send_els1: state change occured"
5147 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5148 			mutex_exit(&pptr->port_mutex);
5149 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5150 		}
5151 		break;
5152 	}
5153 
5154 	case LA_ELS_PRLI: {
5155 		struct la_els_prli	prli;
5156 		struct fcp_prli		*fprli;
5157 
5158 		bzero(&prli, sizeof (struct la_els_prli));
5159 
5160 		hp = &fpkt->pkt_cmd_fhdr;
5161 		hp->r_ctl = R_CTL_ELS_REQ;
5162 
5163 		/* fill in PRLI cmd ELS fields */
5164 		prli.ls_code = LA_ELS_PRLI;
5165 		prli.page_length = 0x10;	/* huh? */
5166 		prli.payload_length = sizeof (struct la_els_prli);
5167 
5168 		icmd->ipkt_opcode = LA_ELS_PRLI;
5169 
5170 		/* get ptr to PRLI service params */
5171 		fprli = (struct fcp_prli *)prli.service_params;
5172 
5173 		/* fill in service params */
5174 		fprli->type = 0x08;
5175 		fprli->resvd1 = 0;
5176 		fprli->orig_process_assoc_valid = 0;
5177 		fprli->resp_process_assoc_valid = 0;
5178 		fprli->establish_image_pair = 1;
5179 		fprli->resvd2 = 0;
5180 		fprli->resvd3 = 0;
5181 		fprli->obsolete_1 = 0;
5182 		fprli->obsolete_2 = 0;
5183 		fprli->data_overlay_allowed = 0;
5184 		fprli->initiator_fn = 1;
5185 		fprli->confirmed_compl_allowed = 1;
5186 
5187 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5188 			fprli->target_fn = 1;
5189 		} else {
5190 			fprli->target_fn = 0;
5191 		}
5192 
5193 		fprli->retry = 1;
5194 		fprli->read_xfer_rdy_disabled = 1;
5195 		fprli->write_xfer_rdy_disabled = 0;
5196 
5197 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5198 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5199 
5200 		/* issue the PRLI request */
5201 
5202 		mutex_enter(&pptr->port_mutex);
5203 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5204 
5205 			mutex_exit(&pptr->port_mutex);
5206 
5207 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5208 			if (rval == FC_SUCCESS) {
5209 				res = DDI_SUCCESS;
5210 				break;
5211 			}
5212 
5213 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5214 
5215 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5216 			    rval, "PRLI");
5217 		} else {
5218 			mutex_exit(&pptr->port_mutex);
5219 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5220 		}
5221 		break;
5222 	}
5223 
5224 	default:
5225 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5226 		break;
5227 	}
5228 
5229 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5230 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5231 	    "fcp_send_els: returning %d", res);
5232 
5233 	if (res != DDI_SUCCESS) {
5234 		if (internal) {
5235 			fcp_icmd_free(pptr, icmd);
5236 		}
5237 	}
5238 
5239 	return (res);
5240 }
5241 
5242 
5243 /*
5244  * called internally update the state of all of the tgts and each LUN
5245  * for this port (i.e. each target  known to be attached to this port)
5246  * if they are not already offline
5247  *
5248  * must be called with the port mutex owned
5249  *
5250  * acquires and releases the target mutexes for each target attached
5251  * to this port
5252  */
5253 void
5254 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5255 {
5256 	int i;
5257 	struct fcp_tgt *ptgt;
5258 
5259 	ASSERT(mutex_owned(&pptr->port_mutex));
5260 
5261 	for (i = 0; i < FCP_NUM_HASH; i++) {
5262 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5263 		    ptgt = ptgt->tgt_next) {
5264 			mutex_enter(&ptgt->tgt_mutex);
5265 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5266 			ptgt->tgt_change_cnt++;
5267 			ptgt->tgt_statec_cause = cause;
5268 			ptgt->tgt_tmp_cnt = 1;
5269 			ptgt->tgt_done = 0;
5270 			mutex_exit(&ptgt->tgt_mutex);
5271 		}
5272 	}
5273 }
5274 
5275 
5276 static void
5277 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5278 {
5279 	int i;
5280 	int ndevs;
5281 	struct fcp_tgt *ptgt;
5282 
5283 	ASSERT(mutex_owned(&pptr->port_mutex));
5284 
5285 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5286 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5287 		    ptgt = ptgt->tgt_next) {
5288 			ndevs++;
5289 		}
5290 	}
5291 
5292 	if (ndevs == 0) {
5293 		return;
5294 	}
5295 	pptr->port_tmp_cnt = ndevs;
5296 
5297 	for (i = 0; i < FCP_NUM_HASH; i++) {
5298 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5299 		    ptgt = ptgt->tgt_next) {
5300 			(void) fcp_call_finish_init_held(pptr, ptgt,
5301 			    lcount, ptgt->tgt_change_cnt, cause);
5302 		}
5303 	}
5304 }
5305 
5306 /*
5307  *     Function: fcp_update_tgt_state
5308  *
5309  *  Description: This function updates the field tgt_state of a target.	 That
5310  *		 field is a bitmap and which bit can be set or reset
5311  *		 individually.	The action applied to the target state is also
5312  *		 applied to all the LUNs belonging to the target (provided the
5313  *		 LUN is not offline).  A side effect of applying the state
5314  *		 modification to the target and the LUNs is the field tgt_trace
5315  *		 of the target and lun_trace of the LUNs is set to zero.
5316  *
5317  *
5318  *     Argument: *ptgt	Target structure.
5319  *		 flag	Flag indication what action to apply (set/reset).
5320  *		 state	State bits to update.
5321  *
5322  * Return Value: None
5323  *
5324  *	Context: Interrupt, Kernel or User context.
5325  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5326  *		 calling this function.
5327  */
5328 void
5329 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5330 {
5331 	struct fcp_lun *plun;
5332 
5333 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5334 
5335 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5336 		/* The target is not offline. */
5337 		if (flag == FCP_SET) {
5338 			ptgt->tgt_state |= state;
5339 			ptgt->tgt_trace = 0;
5340 		} else {
5341 			ptgt->tgt_state &= ~state;
5342 		}
5343 
5344 		for (plun = ptgt->tgt_lun; plun != NULL;
5345 		    plun = plun->lun_next) {
5346 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5347 				/* The LUN is not offline. */
5348 				if (flag == FCP_SET) {
5349 					plun->lun_state |= state;
5350 					plun->lun_trace = 0;
5351 				} else {
5352 					plun->lun_state &= ~state;
5353 				}
5354 			}
5355 		}
5356 	}
5357 }
5358 
5359 /*
5360  *     Function: fcp_update_tgt_state
5361  *
5362  *  Description: This function updates the field lun_state of a LUN.  That
5363  *		 field is a bitmap and which bit can be set or reset
5364  *		 individually.
5365  *
5366  *     Argument: *plun	LUN structure.
5367  *		 flag	Flag indication what action to apply (set/reset).
5368  *		 state	State bits to update.
5369  *
5370  * Return Value: None
5371  *
5372  *	Context: Interrupt, Kernel or User context.
5373  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5374  *		 calling this function.
5375  */
5376 void
5377 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5378 {
5379 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5380 
5381 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5382 
5383 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5384 		if (flag == FCP_SET) {
5385 			plun->lun_state |= state;
5386 		} else {
5387 			plun->lun_state &= ~state;
5388 		}
5389 	}
5390 }
5391 
5392 /*
5393  *     Function: fcp_get_port
5394  *
5395  *  Description: This function returns the fcp_port structure from the opaque
5396  *		 handle passed by the caller.  That opaque handle is the handle
5397  *		 used by fp/fctl to identify a particular local port.  That
5398  *		 handle has been stored in the corresponding fcp_port
5399  *		 structure.  This function is going to walk the global list of
5400  *		 fcp_port structures till one has a port_fp_handle that matches
5401  *		 the handle passed by the caller.  This function enters the
5402  *		 mutex fcp_global_mutex while walking the global list and then
5403  *		 releases it.
5404  *
5405  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5406  *				particular port.
5407  *
5408  * Return Value: NULL		Not found.
5409  *		 Not NULL	Pointer to the fcp_port structure.
5410  *
5411  *	Context: Interrupt, Kernel or User context.
5412  */
5413 static struct fcp_port *
5414 fcp_get_port(opaque_t port_handle)
5415 {
5416 	struct fcp_port *pptr;
5417 
5418 	ASSERT(port_handle != NULL);
5419 
5420 	mutex_enter(&fcp_global_mutex);
5421 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5422 		if (pptr->port_fp_handle == port_handle) {
5423 			break;
5424 		}
5425 	}
5426 	mutex_exit(&fcp_global_mutex);
5427 
5428 	return (pptr);
5429 }
5430 
5431 
5432 static void
5433 fcp_unsol_callback(fc_packet_t *fpkt)
5434 {
5435 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5436 	struct fcp_port *pptr = icmd->ipkt_port;
5437 
5438 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5439 		caddr_t state, reason, action, expln;
5440 
5441 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5442 		    &action, &expln);
5443 
5444 		fcp_log(CE_WARN, pptr->port_dip,
5445 		    "!couldn't post response to unsolicited request: "
5446 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5447 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5448 		    fpkt->pkt_cmd_fhdr.rx_id);
5449 	}
5450 	fcp_icmd_free(pptr, icmd);
5451 }
5452 
5453 
5454 /*
5455  * Perform general purpose preparation of a response to an unsolicited request
5456  */
5457 static void
5458 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5459     uchar_t r_ctl, uchar_t type)
5460 {
5461 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5462 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5463 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5464 	pkt->pkt_cmd_fhdr.type = type;
5465 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5466 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5467 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5468 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5469 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5470 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5471 	pkt->pkt_cmd_fhdr.ro = 0;
5472 	pkt->pkt_cmd_fhdr.rsvd = 0;
5473 	pkt->pkt_comp = fcp_unsol_callback;
5474 	pkt->pkt_pd = NULL;
5475 }
5476 
5477 
5478 /*ARGSUSED*/
5479 static int
5480 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5481 {
5482 	fc_packet_t		*fpkt;
5483 	struct la_els_prli	prli;
5484 	struct fcp_prli		*fprli;
5485 	struct fcp_ipkt	*icmd;
5486 	struct la_els_prli	*from;
5487 	struct fcp_prli		*orig;
5488 	struct fcp_tgt	*ptgt;
5489 	int			tcount = 0;
5490 	int			lcount;
5491 
5492 	from = (struct la_els_prli *)buf->ub_buffer;
5493 	orig = (struct fcp_prli *)from->service_params;
5494 
5495 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5496 	    NULL) {
5497 		mutex_enter(&ptgt->tgt_mutex);
5498 		tcount = ptgt->tgt_change_cnt;
5499 		mutex_exit(&ptgt->tgt_mutex);
5500 	}
5501 	mutex_enter(&pptr->port_mutex);
5502 	lcount = pptr->port_link_cnt;
5503 	mutex_exit(&pptr->port_mutex);
5504 
5505 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5506 	    sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0,
5507 	    FC_INVALID_RSCN_COUNT)) == NULL) {
5508 		return (FC_FAILURE);
5509 	}
5510 	fpkt = icmd->ipkt_fpkt;
5511 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5512 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5513 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5514 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5515 	fpkt->pkt_rsplen = 0;
5516 	fpkt->pkt_datalen = 0;
5517 
5518 	icmd->ipkt_opcode = LA_ELS_PRLI;
5519 
5520 	bzero(&prli, sizeof (struct la_els_prli));
5521 	fprli = (struct fcp_prli *)prli.service_params;
5522 	prli.ls_code = LA_ELS_ACC;
5523 	prli.page_length = 0x10;
5524 	prli.payload_length = sizeof (struct la_els_prli);
5525 
5526 	/* fill in service params */
5527 	fprli->type = 0x08;
5528 	fprli->resvd1 = 0;
5529 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5530 	fprli->orig_process_associator = orig->orig_process_associator;
5531 	fprli->resp_process_assoc_valid = 0;
5532 	fprli->establish_image_pair = 1;
5533 	fprli->resvd2 = 0;
5534 	fprli->resvd3 = 0;
5535 	fprli->obsolete_1 = 0;
5536 	fprli->obsolete_2 = 0;
5537 	fprli->data_overlay_allowed = 0;
5538 	fprli->initiator_fn = 1;
5539 	fprli->confirmed_compl_allowed = 1;
5540 
5541 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5542 		fprli->target_fn = 1;
5543 	} else {
5544 		fprli->target_fn = 0;
5545 	}
5546 
5547 	fprli->retry = 1;
5548 	fprli->read_xfer_rdy_disabled = 1;
5549 	fprli->write_xfer_rdy_disabled = 0;
5550 
5551 	/* save the unsol prli payload first */
5552 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5553 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5554 
5555 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5556 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5557 
5558 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5559 
5560 	mutex_enter(&pptr->port_mutex);
5561 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5562 		int rval;
5563 		mutex_exit(&pptr->port_mutex);
5564 
5565 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5566 		    FC_SUCCESS) {
5567 			if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
5568 				fcp_queue_ipkt(pptr, fpkt);
5569 				return (FC_SUCCESS);
5570 			}
5571 			/* Let it timeout */
5572 			fcp_icmd_free(pptr, icmd);
5573 			return (FC_FAILURE);
5574 		}
5575 	} else {
5576 		mutex_exit(&pptr->port_mutex);
5577 		fcp_icmd_free(pptr, icmd);
5578 		return (FC_FAILURE);
5579 	}
5580 
5581 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5582 
5583 	return (FC_SUCCESS);
5584 }
5585 
5586 /*
5587  *     Function: fcp_icmd_alloc
5588  *
5589  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5590  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5591  *		 modified by the caller (such as fcp_send_scsi).  The
5592  *		 structure is also tied to the state of the line and of the
5593  *		 target at a particular time.  That link is established by
5594  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5595  *		 and tcount which came respectively from pptr->link_cnt and
5596  *		 ptgt->tgt_change_cnt.
5597  *
5598  *     Argument: *pptr		Fcp port.
5599  *		 *ptgt		Target (destination of the command).
5600  *		 cmd_len	Length of the command.
5601  *		 resp_len	Length of the expected response.
5602  *		 data_len	Length of the data.
5603  *		 nodma		Indicates weither the command and response.
5604  *				will be transfer through DMA or not.
5605  *		 lcount		Link state change counter.
5606  *		 tcount		Target state change counter.
5607  *		 cause		Reason that lead to this call.
5608  *
5609  * Return Value: NULL		Failed.
5610  *		 Not NULL	Internal packet address.
5611  */
5612 static struct fcp_ipkt *
5613 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5614     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5615     uint32_t rscn_count)
5616 {
5617 	int			dma_setup = 0;
5618 	fc_packet_t		*fpkt;
5619 	struct fcp_ipkt	*icmd = NULL;
5620 
5621 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5622 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5623 	    KM_NOSLEEP);
5624 	if (icmd == NULL) {
5625 		fcp_log(CE_WARN, pptr->port_dip,
5626 		    "!internal packet allocation failed");
5627 		return (NULL);
5628 	}
5629 
5630 	/*
5631 	 * initialize the allocated packet
5632 	 */
5633 	icmd->ipkt_nodma = nodma;
5634 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5635 	icmd->ipkt_lun = NULL;
5636 
5637 	icmd->ipkt_link_cnt = lcount;
5638 	icmd->ipkt_change_cnt = tcount;
5639 	icmd->ipkt_cause = cause;
5640 
5641 	mutex_enter(&pptr->port_mutex);
5642 	icmd->ipkt_port = pptr;
5643 	mutex_exit(&pptr->port_mutex);
5644 
5645 	/* keep track of amt of data to be sent in pkt */
5646 	icmd->ipkt_cmdlen = cmd_len;
5647 	icmd->ipkt_resplen = resp_len;
5648 	icmd->ipkt_datalen = data_len;
5649 
5650 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5651 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5652 
5653 	/* set pkt's private ptr to point to cmd pkt */
5654 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5655 
5656 	/* set FCA private ptr to memory just beyond */
5657 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5658 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5659 	    pptr->port_dmacookie_sz);
5660 
5661 	/* get ptr to fpkt substruct and fill it in */
5662 	fpkt = icmd->ipkt_fpkt;
5663 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5664 	    sizeof (struct fcp_ipkt));
5665 
5666 	if (ptgt != NULL) {
5667 		icmd->ipkt_tgt = ptgt;
5668 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5669 	}
5670 
5671 	fpkt->pkt_comp = fcp_icmd_callback;
5672 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5673 	fpkt->pkt_cmdlen = cmd_len;
5674 	fpkt->pkt_rsplen = resp_len;
5675 	fpkt->pkt_datalen = data_len;
5676 
5677 	/*
5678 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5679 	 * rscn_count as fcp knows down to the transport. If a valid count was
5680 	 * passed into this function, we allocate memory to actually pass down
5681 	 * this info.
5682 	 *
5683 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5684 	 * basically mean that fcp will not be able to help transport
5685 	 * distinguish if a new RSCN has come after fcp was last informed about
5686 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5687 	 * 5068068 where the device might end up going offline in case of RSCN
5688 	 * storms.
5689 	 */
5690 	fpkt->pkt_ulp_rscn_infop = NULL;
5691 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5692 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5693 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5694 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5695 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5696 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5697 			    "Failed to alloc memory to pass rscn info");
5698 		}
5699 	}
5700 
5701 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5702 		fc_ulp_rscn_info_t	*rscnp;
5703 
5704 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5705 		rscnp->ulp_rscn_count = rscn_count;
5706 	}
5707 
5708 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5709 		goto fail;
5710 	}
5711 	dma_setup++;
5712 
5713 	/*
5714 	 * Must hold target mutex across setting of pkt_pd and call to
5715 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5716 	 * away while we're not looking.
5717 	 */
5718 	if (ptgt != NULL) {
5719 		mutex_enter(&ptgt->tgt_mutex);
5720 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5721 
5722 		/* ask transport to do its initialization on this pkt */
5723 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5724 		    != FC_SUCCESS) {
5725 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5726 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5727 			    "fc_ulp_init_packet failed");
5728 			mutex_exit(&ptgt->tgt_mutex);
5729 			goto fail;
5730 		}
5731 		mutex_exit(&ptgt->tgt_mutex);
5732 	} else {
5733 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5734 		    != FC_SUCCESS) {
5735 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5736 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5737 			    "fc_ulp_init_packet failed");
5738 			goto fail;
5739 		}
5740 	}
5741 
5742 	mutex_enter(&pptr->port_mutex);
5743 	if (pptr->port_state & (FCP_STATE_DETACHING |
5744 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5745 		int rval;
5746 
5747 		mutex_exit(&pptr->port_mutex);
5748 
5749 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5750 		ASSERT(rval == FC_SUCCESS);
5751 
5752 		goto fail;
5753 	}
5754 
5755 	if (ptgt != NULL) {
5756 		mutex_enter(&ptgt->tgt_mutex);
5757 		ptgt->tgt_ipkt_cnt++;
5758 		mutex_exit(&ptgt->tgt_mutex);
5759 	}
5760 
5761 	pptr->port_ipkt_cnt++;
5762 
5763 	mutex_exit(&pptr->port_mutex);
5764 
5765 	return (icmd);
5766 
5767 fail:
5768 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5769 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5770 		    sizeof (fc_ulp_rscn_info_t));
5771 		fpkt->pkt_ulp_rscn_infop = NULL;
5772 	}
5773 
5774 	if (dma_setup) {
5775 		fcp_free_dma(pptr, icmd);
5776 	}
5777 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5778 	    (size_t)pptr->port_dmacookie_sz);
5779 
5780 	return (NULL);
5781 }
5782 
5783 /*
5784  *     Function: fcp_icmd_free
5785  *
5786  *  Description: Frees the internal command passed by the caller.
5787  *
5788  *     Argument: *pptr		Fcp port.
5789  *		 *icmd		Internal packet to free.
5790  *
5791  * Return Value: None
5792  */
5793 static void
5794 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5795 {
5796 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5797 
5798 	/* Let the underlying layers do their cleanup. */
5799 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5800 	    icmd->ipkt_fpkt);
5801 
5802 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5803 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5804 		    sizeof (fc_ulp_rscn_info_t));
5805 	}
5806 
5807 	fcp_free_dma(pptr, icmd);
5808 
5809 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5810 	    (size_t)pptr->port_dmacookie_sz);
5811 
5812 	mutex_enter(&pptr->port_mutex);
5813 
5814 	if (ptgt) {
5815 		mutex_enter(&ptgt->tgt_mutex);
5816 		ptgt->tgt_ipkt_cnt--;
5817 		mutex_exit(&ptgt->tgt_mutex);
5818 	}
5819 
5820 	pptr->port_ipkt_cnt--;
5821 	mutex_exit(&pptr->port_mutex);
5822 }
5823 
5824 /*
5825  *     Function: fcp_alloc_dma
5826  *
5827  *  Description: Allocated the DMA resources required for the internal
5828  *		 packet.
5829  *
5830  *     Argument: *pptr	FCP port.
5831  *		 *icmd	Internal FCP packet.
5832  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5833  *		 flags	Allocation flags (Sleep or NoSleep).
5834  *
5835  * Return Value: FC_SUCCESS
5836  *		 FC_NOMEM
5837  */
5838 static int
5839 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5840     int nodma, int flags)
5841 {
5842 	int		rval;
5843 	size_t		real_size;
5844 	uint_t		ccount;
5845 	int		bound = 0;
5846 	int		cmd_resp = 0;
5847 	fc_packet_t	*fpkt;
5848 	ddi_dma_cookie_t	pkt_data_cookie;
5849 	ddi_dma_cookie_t	*cp;
5850 	uint32_t		cnt;
5851 
5852 	fpkt = &icmd->ipkt_fc_packet;
5853 
5854 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5855 	    fpkt->pkt_resp_dma == NULL);
5856 
5857 	icmd->ipkt_nodma = nodma;
5858 
5859 	if (nodma) {
5860 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5861 		if (fpkt->pkt_cmd == NULL) {
5862 			goto fail;
5863 		}
5864 
5865 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5866 		if (fpkt->pkt_resp == NULL) {
5867 			goto fail;
5868 		}
5869 	} else {
5870 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5871 
5872 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5873 		if (rval == FC_FAILURE) {
5874 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5875 			    fpkt->pkt_resp_dma == NULL);
5876 			goto fail;
5877 		}
5878 		cmd_resp++;
5879 	}
5880 
5881 	if (fpkt->pkt_datalen != 0) {
5882 		/*
5883 		 * set up DMA handle and memory for the data in this packet
5884 		 */
5885 		if (ddi_dma_alloc_handle(pptr->port_dip,
5886 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5887 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5888 			goto fail;
5889 		}
5890 
5891 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5892 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5893 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5894 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5895 			goto fail;
5896 		}
5897 
5898 		/* was DMA mem size gotten < size asked for/needed ?? */
5899 		if (real_size < fpkt->pkt_datalen) {
5900 			goto fail;
5901 		}
5902 
5903 		/* bind DMA address and handle together */
5904 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
5905 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
5906 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
5907 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
5908 			goto fail;
5909 		}
5910 		bound++;
5911 
5912 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
5913 			goto fail;
5914 		}
5915 
5916 		fpkt->pkt_data_cookie_cnt = ccount;
5917 
5918 		cp = fpkt->pkt_data_cookie;
5919 		*cp = pkt_data_cookie;
5920 		cp++;
5921 
5922 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
5923 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
5924 			    &pkt_data_cookie);
5925 			*cp = pkt_data_cookie;
5926 		}
5927 
5928 	}
5929 
5930 	return (FC_SUCCESS);
5931 
5932 fail:
5933 	if (bound) {
5934 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5935 	}
5936 
5937 	if (fpkt->pkt_data_dma) {
5938 		if (fpkt->pkt_data) {
5939 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5940 		}
5941 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5942 	}
5943 
5944 	if (nodma) {
5945 		if (fpkt->pkt_cmd) {
5946 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
5947 		}
5948 		if (fpkt->pkt_resp) {
5949 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
5950 		}
5951 	} else {
5952 		if (cmd_resp) {
5953 			fcp_free_cmd_resp(pptr, fpkt);
5954 		}
5955 	}
5956 
5957 	return (FC_NOMEM);
5958 }
5959 
5960 
5961 static void
5962 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5963 {
5964 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
5965 
5966 	if (fpkt->pkt_data_dma) {
5967 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5968 		if (fpkt->pkt_data) {
5969 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5970 		}
5971 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5972 	}
5973 
5974 	if (icmd->ipkt_nodma) {
5975 		if (fpkt->pkt_cmd) {
5976 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
5977 		}
5978 		if (fpkt->pkt_resp) {
5979 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
5980 		}
5981 	} else {
5982 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
5983 
5984 		fcp_free_cmd_resp(pptr, fpkt);
5985 	}
5986 }
5987 
5988 /*
5989  *     Function: fcp_lookup_target
5990  *
5991  *  Description: Finds a target given a WWN.
5992  *
5993  *     Argument: *pptr	FCP port.
5994  *		 *wwn	World Wide Name of the device to look for.
5995  *
5996  * Return Value: NULL		No target found
5997  *		 Not NULL	Target structure
5998  *
5999  *	Context: Interrupt context.
6000  *		 The mutex pptr->port_mutex must be owned.
6001  */
6002 /* ARGSUSED */
6003 static struct fcp_tgt *
6004 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6005 {
6006 	int			hash;
6007 	struct fcp_tgt	*ptgt;
6008 
6009 	ASSERT(mutex_owned(&pptr->port_mutex));
6010 
6011 	hash = FCP_HASH(wwn);
6012 
6013 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6014 	    ptgt = ptgt->tgt_next) {
6015 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6016 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6017 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6018 			break;
6019 		}
6020 	}
6021 
6022 	return (ptgt);
6023 }
6024 
6025 
6026 /*
6027  * Find target structure given a port identifier
6028  */
6029 static struct fcp_tgt *
6030 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6031 {
6032 	fc_portid_t		port_id;
6033 	la_wwn_t		pwwn;
6034 	struct fcp_tgt	*ptgt = NULL;
6035 
6036 	port_id.priv_lilp_posit = 0;
6037 	port_id.port_id = d_id;
6038 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6039 	    &pwwn) == FC_SUCCESS) {
6040 		mutex_enter(&pptr->port_mutex);
6041 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6042 		mutex_exit(&pptr->port_mutex);
6043 	}
6044 
6045 	return (ptgt);
6046 }
6047 
6048 
6049 /*
6050  * the packet completion callback routine for info cmd pkts
6051  *
6052  * this means fpkt pts to a response to either a PLOGI or a PRLI
6053  *
6054  * if there is an error an attempt is made to call a routine to resend
6055  * the command that failed
6056  */
6057 static void
6058 fcp_icmd_callback(fc_packet_t *fpkt)
6059 {
6060 	struct fcp_ipkt	*icmd;
6061 	struct fcp_port	*pptr;
6062 	struct fcp_tgt	*ptgt;
6063 	struct la_els_prli	*prli;
6064 	struct la_els_prli	prli_s;
6065 	struct fcp_prli		*fprli;
6066 	struct fcp_lun	*plun;
6067 	int		free_pkt = 1;
6068 	int		rval;
6069 	ls_code_t	resp;
6070 	uchar_t		prli_acc = 0;
6071 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6072 	int		lun0_newalloc;
6073 
6074 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6075 
6076 	/* get ptrs to the port and target structs for the cmd */
6077 	pptr = icmd->ipkt_port;
6078 	ptgt = icmd->ipkt_tgt;
6079 
6080 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6081 
6082 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6083 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6084 		    sizeof (prli_s));
6085 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6086 	}
6087 
6088 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6089 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6090 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6091 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6092 	    ptgt->tgt_d_id);
6093 
6094 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6095 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6096 
6097 		mutex_enter(&ptgt->tgt_mutex);
6098 		if (ptgt->tgt_pd_handle == NULL) {
6099 			/*
6100 			 * in a fabric environment the port device handles
6101 			 * get created only after successful LOGIN into the
6102 			 * transport, so the transport makes this port
6103 			 * device (pd) handle available in this packet, so
6104 			 * save it now
6105 			 */
6106 			ASSERT(fpkt->pkt_pd != NULL);
6107 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6108 		}
6109 		mutex_exit(&ptgt->tgt_mutex);
6110 
6111 		/* which ELS cmd is this response for ?? */
6112 		switch (icmd->ipkt_opcode) {
6113 		case LA_ELS_PLOGI:
6114 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6115 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6116 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6117 			    ptgt->tgt_d_id,
6118 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6119 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6120 
6121 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6122 			    FCP_TGT_TRACE_15);
6123 
6124 			/* Note that we are not allocating a new icmd */
6125 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6126 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6127 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6128 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6129 				    FCP_TGT_TRACE_16);
6130 				goto fail;
6131 			}
6132 			break;
6133 
6134 		case LA_ELS_PRLI:
6135 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6136 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6137 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6138 
6139 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6140 			    FCP_TGT_TRACE_17);
6141 
6142 			prli = &prli_s;
6143 
6144 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6145 			    sizeof (prli_s));
6146 
6147 			fprli = (struct fcp_prli *)prli->service_params;
6148 
6149 			mutex_enter(&ptgt->tgt_mutex);
6150 			ptgt->tgt_icap = fprli->initiator_fn;
6151 			ptgt->tgt_tcap = fprli->target_fn;
6152 			mutex_exit(&ptgt->tgt_mutex);
6153 
6154 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6155 				/*
6156 				 * this FCP device does not support target mode
6157 				 */
6158 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6159 				    FCP_TGT_TRACE_18);
6160 				goto fail;
6161 			}
6162 			if (fprli->retry == 1) {
6163 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6164 				    &ptgt->tgt_port_wwn);
6165 			}
6166 
6167 			/* target is no longer offline */
6168 			mutex_enter(&pptr->port_mutex);
6169 			mutex_enter(&ptgt->tgt_mutex);
6170 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6171 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6172 				    FCP_TGT_MARK);
6173 			} else {
6174 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6175 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6176 				    "fcp_icmd_callback,1: state change "
6177 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6178 				mutex_exit(&ptgt->tgt_mutex);
6179 				mutex_exit(&pptr->port_mutex);
6180 				goto fail;
6181 			}
6182 			mutex_exit(&ptgt->tgt_mutex);
6183 			mutex_exit(&pptr->port_mutex);
6184 
6185 			/*
6186 			 * lun 0 should always respond to inquiry, so
6187 			 * get the LUN struct for LUN 0
6188 			 *
6189 			 * Currently we deal with first level of addressing.
6190 			 * If / when we start supporting 0x device types
6191 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6192 			 * this logic will need revisiting.
6193 			 */
6194 			lun0_newalloc = 0;
6195 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6196 				/*
6197 				 * no LUN struct for LUN 0 yet exists,
6198 				 * so create one
6199 				 */
6200 				plun = fcp_alloc_lun(ptgt);
6201 				if (plun == NULL) {
6202 					fcp_log(CE_WARN, pptr->port_dip,
6203 					    "!Failed to allocate lun 0 for"
6204 					    " D_ID=%x", ptgt->tgt_d_id);
6205 					goto fail;
6206 				}
6207 				lun0_newalloc = 1;
6208 			}
6209 
6210 			/* fill in LUN info */
6211 			mutex_enter(&ptgt->tgt_mutex);
6212 			/*
6213 			 * consider lun 0 as device not connected if it is
6214 			 * offlined or newly allocated
6215 			 */
6216 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6217 			    lun0_newalloc) {
6218 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6219 			}
6220 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6221 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6222 			ptgt->tgt_lun_cnt = 1;
6223 			ptgt->tgt_report_lun_cnt = 0;
6224 			mutex_exit(&ptgt->tgt_mutex);
6225 
6226 			/* Retrieve the rscn count (if a valid one exists) */
6227 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6228 				rscn_count = ((fc_ulp_rscn_info_t *)
6229 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6230 				    ->ulp_rscn_count;
6231 			} else {
6232 				rscn_count = FC_INVALID_RSCN_COUNT;
6233 			}
6234 
6235 			/* send Report Lun request to target */
6236 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6237 			    sizeof (struct fcp_reportlun_resp),
6238 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6239 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6240 				mutex_enter(&pptr->port_mutex);
6241 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6242 					fcp_log(CE_WARN, pptr->port_dip,
6243 					    "!Failed to send REPORT LUN to"
6244 					    "  D_ID=%x", ptgt->tgt_d_id);
6245 				} else {
6246 					FCP_TRACE(fcp_logq,
6247 					    pptr->port_instbuf, fcp_trace,
6248 					    FCP_BUF_LEVEL_5, 0,
6249 					    "fcp_icmd_callback,2:state change"
6250 					    " occured for D_ID=0x%x",
6251 					    ptgt->tgt_d_id);
6252 				}
6253 				mutex_exit(&pptr->port_mutex);
6254 
6255 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6256 				    FCP_TGT_TRACE_19);
6257 
6258 				goto fail;
6259 			} else {
6260 				free_pkt = 0;
6261 				fcp_icmd_free(pptr, icmd);
6262 			}
6263 			break;
6264 
6265 		default:
6266 			fcp_log(CE_WARN, pptr->port_dip,
6267 			    "!fcp_icmd_callback Invalid opcode");
6268 			goto fail;
6269 		}
6270 
6271 		return;
6272 	}
6273 
6274 
6275 	/*
6276 	 * Other PLOGI failures are not retried as the
6277 	 * transport does it already
6278 	 */
6279 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6280 		if (fcp_is_retryable(icmd) &&
6281 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6282 
6283 			if (FCP_MUST_RETRY(fpkt)) {
6284 				fcp_queue_ipkt(pptr, fpkt);
6285 				return;
6286 			}
6287 
6288 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6289 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6290 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6291 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6292 			    fpkt->pkt_reason);
6293 
6294 			/*
6295 			 * Retry by recalling the routine that
6296 			 * originally queued this packet
6297 			 */
6298 			mutex_enter(&pptr->port_mutex);
6299 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6300 				caddr_t msg;
6301 
6302 				mutex_exit(&pptr->port_mutex);
6303 
6304 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6305 
6306 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6307 					fpkt->pkt_timeout +=
6308 					    FCP_TIMEOUT_DELTA;
6309 				}
6310 
6311 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6312 				    fpkt);
6313 				if (rval == FC_SUCCESS) {
6314 					return;
6315 				}
6316 
6317 				if (rval == FC_STATEC_BUSY ||
6318 				    rval == FC_OFFLINE) {
6319 					fcp_queue_ipkt(pptr, fpkt);
6320 					return;
6321 				}
6322 				(void) fc_ulp_error(rval, &msg);
6323 
6324 				fcp_log(CE_NOTE, pptr->port_dip,
6325 				    "!ELS 0x%x failed to d_id=0x%x;"
6326 				    " %s", icmd->ipkt_opcode,
6327 				    ptgt->tgt_d_id, msg);
6328 			} else {
6329 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6330 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6331 				    "fcp_icmd_callback,3: state change "
6332 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6333 				mutex_exit(&pptr->port_mutex);
6334 			}
6335 		}
6336 	} else {
6337 		if (fcp_is_retryable(icmd) &&
6338 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6339 			if (FCP_MUST_RETRY(fpkt)) {
6340 				fcp_queue_ipkt(pptr, fpkt);
6341 				return;
6342 			}
6343 		}
6344 		mutex_enter(&pptr->port_mutex);
6345 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6346 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6347 			mutex_exit(&pptr->port_mutex);
6348 			fcp_print_error(fpkt);
6349 		} else {
6350 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6351 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6352 			    "fcp_icmd_callback,4: state change occured"
6353 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6354 			mutex_exit(&pptr->port_mutex);
6355 		}
6356 	}
6357 
6358 fail:
6359 	if (free_pkt) {
6360 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6361 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6362 		fcp_icmd_free(pptr, icmd);
6363 	}
6364 }
6365 
6366 
6367 /*
6368  * called internally to send an info cmd using the transport
6369  *
6370  * sends either an INQ or a REPORT_LUN
6371  *
6372  * when the packet is completed fcp_scsi_callback is called
6373  */
6374 static int
6375 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6376     int lcount, int tcount, int cause, uint32_t rscn_count)
6377 {
6378 	int			nodma;
6379 	struct fcp_ipkt		*icmd;
6380 	struct fcp_tgt		*ptgt;
6381 	struct fcp_port		*pptr;
6382 	fc_frame_hdr_t		*hp;
6383 	fc_packet_t		*fpkt;
6384 	struct fcp_cmd		fcp_cmd;
6385 	struct fcp_cmd		*fcmd;
6386 	union scsi_cdb		*scsi_cdb;
6387 
6388 	ASSERT(plun != NULL);
6389 
6390 	ptgt = plun->lun_tgt;
6391 	ASSERT(ptgt != NULL);
6392 
6393 	pptr = ptgt->tgt_port;
6394 	ASSERT(pptr != NULL);
6395 
6396 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6397 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6398 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6399 
6400 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6401 
6402 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6403 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6404 	    rscn_count);
6405 
6406 	if (icmd == NULL) {
6407 		return (DDI_FAILURE);
6408 	}
6409 
6410 	fpkt = icmd->ipkt_fpkt;
6411 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6412 	icmd->ipkt_retries = 0;
6413 	icmd->ipkt_opcode = opcode;
6414 	icmd->ipkt_lun = plun;
6415 
6416 	if (nodma) {
6417 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6418 	} else {
6419 		fcmd = &fcp_cmd;
6420 	}
6421 	bzero(fcmd, sizeof (struct fcp_cmd));
6422 
6423 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6424 
6425 	hp = &fpkt->pkt_cmd_fhdr;
6426 
6427 	hp->s_id = pptr->port_id;
6428 	hp->d_id = ptgt->tgt_d_id;
6429 	hp->r_ctl = R_CTL_COMMAND;
6430 	hp->type = FC_TYPE_SCSI_FCP;
6431 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6432 	hp->rsvd = 0;
6433 	hp->seq_id = 0;
6434 	hp->seq_cnt = 0;
6435 	hp->ox_id = 0xffff;
6436 	hp->rx_id = 0xffff;
6437 	hp->ro = 0;
6438 
6439 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6440 
6441 	/*
6442 	 * Request SCSI target for expedited processing
6443 	 */
6444 
6445 	/*
6446 	 * Set up for untagged queuing because we do not
6447 	 * know if the fibre device supports queuing.
6448 	 */
6449 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6450 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6451 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6452 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6453 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6454 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6455 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6456 
6457 	switch (opcode) {
6458 	case SCMD_INQUIRY_PAGE83:
6459 		/*
6460 		 * Prepare to get the Inquiry VPD page 83 information
6461 		 */
6462 		fcmd->fcp_cntl.cntl_read_data = 1;
6463 		fcmd->fcp_cntl.cntl_write_data = 0;
6464 		fcmd->fcp_data_len = alloc_len;
6465 
6466 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6467 		fpkt->pkt_comp = fcp_scsi_callback;
6468 
6469 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6470 		scsi_cdb->g0_addr2 = 0x01;
6471 		scsi_cdb->g0_addr1 = 0x83;
6472 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6473 		break;
6474 
6475 	case SCMD_INQUIRY:
6476 		fcmd->fcp_cntl.cntl_read_data = 1;
6477 		fcmd->fcp_cntl.cntl_write_data = 0;
6478 		fcmd->fcp_data_len = alloc_len;
6479 
6480 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6481 		fpkt->pkt_comp = fcp_scsi_callback;
6482 
6483 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6484 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6485 		break;
6486 
6487 	case SCMD_REPORT_LUN: {
6488 		fc_portid_t	d_id;
6489 		opaque_t	fca_dev;
6490 
6491 		ASSERT(alloc_len >= 16);
6492 
6493 		d_id.priv_lilp_posit = 0;
6494 		d_id.port_id = ptgt->tgt_d_id;
6495 
6496 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6497 
6498 		mutex_enter(&ptgt->tgt_mutex);
6499 		ptgt->tgt_fca_dev = fca_dev;
6500 		mutex_exit(&ptgt->tgt_mutex);
6501 
6502 		fcmd->fcp_cntl.cntl_read_data = 1;
6503 		fcmd->fcp_cntl.cntl_write_data = 0;
6504 		fcmd->fcp_data_len = alloc_len;
6505 
6506 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6507 		fpkt->pkt_comp = fcp_scsi_callback;
6508 
6509 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6510 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6511 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6512 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6513 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6514 		break;
6515 	}
6516 
6517 	default:
6518 		fcp_log(CE_WARN, pptr->port_dip,
6519 		    "!fcp_send_scsi Invalid opcode");
6520 		break;
6521 	}
6522 
6523 	if (!nodma) {
6524 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6525 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6526 	}
6527 
6528 	mutex_enter(&pptr->port_mutex);
6529 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6530 
6531 		mutex_exit(&pptr->port_mutex);
6532 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6533 		    FC_SUCCESS) {
6534 			fcp_icmd_free(pptr, icmd);
6535 			return (DDI_FAILURE);
6536 		}
6537 		return (DDI_SUCCESS);
6538 	} else {
6539 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6540 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6541 		    "fcp_send_scsi,1: state change occured"
6542 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6543 		mutex_exit(&pptr->port_mutex);
6544 		fcp_icmd_free(pptr, icmd);
6545 		return (DDI_FAILURE);
6546 	}
6547 }
6548 
6549 
6550 /*
6551  * called by fcp_scsi_callback to check to handle the case where
6552  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6553  */
6554 static int
6555 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6556 {
6557 	uchar_t				rqlen;
6558 	int				rval = DDI_FAILURE;
6559 	struct scsi_extended_sense	sense_info, *sense;
6560 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6561 	    fpkt->pkt_ulp_private;
6562 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6563 	struct fcp_port		*pptr = ptgt->tgt_port;
6564 
6565 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6566 
6567 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6568 		/*
6569 		 * SCSI-II Reserve Release support. Some older FC drives return
6570 		 * Reservation conflict for Report Luns command.
6571 		 */
6572 		if (icmd->ipkt_nodma) {
6573 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6574 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6575 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6576 		} else {
6577 			fcp_rsp_t	new_resp;
6578 
6579 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6580 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6581 
6582 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6583 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6584 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6585 
6586 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6587 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6588 		}
6589 
6590 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6591 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6592 
6593 		return (DDI_SUCCESS);
6594 	}
6595 
6596 	sense = &sense_info;
6597 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6598 		/* no need to continue if sense length is not set */
6599 		return (rval);
6600 	}
6601 
6602 	/* casting 64-bit integer to 8-bit */
6603 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6604 	    sizeof (struct scsi_extended_sense));
6605 
6606 	if (rqlen < 14) {
6607 		/* no need to continue if request length isn't long enough */
6608 		return (rval);
6609 	}
6610 
6611 	if (icmd->ipkt_nodma) {
6612 		/*
6613 		 * We can safely use fcp_response_len here since the
6614 		 * only path that calls fcp_check_reportlun,
6615 		 * fcp_scsi_callback, has already called
6616 		 * fcp_validate_fcp_response.
6617 		 */
6618 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6619 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6620 	} else {
6621 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6622 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6623 		    sizeof (struct scsi_extended_sense));
6624 	}
6625 
6626 	if (!FCP_SENSE_NO_LUN(sense)) {
6627 		mutex_enter(&ptgt->tgt_mutex);
6628 		/* clear the flag if any */
6629 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6630 		mutex_exit(&ptgt->tgt_mutex);
6631 	}
6632 
6633 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6634 	    (sense->es_add_code == 0x20)) {
6635 		if (icmd->ipkt_nodma) {
6636 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6637 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6638 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6639 		} else {
6640 			fcp_rsp_t	new_resp;
6641 
6642 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6643 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6644 
6645 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6646 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6647 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6648 
6649 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6650 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6651 		}
6652 
6653 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6654 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6655 
6656 		return (DDI_SUCCESS);
6657 	}
6658 
6659 	/*
6660 	 * This is for the STK library which returns a check condition,
6661 	 * to indicate device is not ready, manual assistance needed.
6662 	 * This is to a report lun command when the door is open.
6663 	 */
6664 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6665 		if (icmd->ipkt_nodma) {
6666 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6667 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6668 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6669 		} else {
6670 			fcp_rsp_t	new_resp;
6671 
6672 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6673 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6674 
6675 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6676 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6677 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6678 
6679 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6680 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6681 		}
6682 
6683 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6684 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6685 
6686 		return (DDI_SUCCESS);
6687 	}
6688 
6689 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6690 	    (FCP_SENSE_NO_LUN(sense))) {
6691 		mutex_enter(&ptgt->tgt_mutex);
6692 		if ((FCP_SENSE_NO_LUN(sense)) &&
6693 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6694 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6695 			mutex_exit(&ptgt->tgt_mutex);
6696 			/*
6697 			 * reconfig was triggred by ILLEGAL REQUEST but
6698 			 * got ILLEGAL REQUEST again
6699 			 */
6700 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6701 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6702 			    "!FCP: Unable to obtain Report Lun data"
6703 			    " target=%x", ptgt->tgt_d_id);
6704 		} else {
6705 			if (ptgt->tgt_tid == NULL) {
6706 				timeout_id_t	tid;
6707 				/*
6708 				 * REPORT LUN data has changed.	 Kick off
6709 				 * rediscovery
6710 				 */
6711 				tid = timeout(fcp_reconfigure_luns,
6712 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6713 
6714 				ptgt->tgt_tid = tid;
6715 				ptgt->tgt_state |= FCP_TGT_BUSY;
6716 			}
6717 			if (FCP_SENSE_NO_LUN(sense)) {
6718 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6719 			}
6720 			mutex_exit(&ptgt->tgt_mutex);
6721 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6722 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6723 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6724 				    "!FCP:Report Lun Has Changed"
6725 				    " target=%x", ptgt->tgt_d_id);
6726 			} else if (FCP_SENSE_NO_LUN(sense)) {
6727 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6728 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6729 				    "!FCP:LU Not Supported"
6730 				    " target=%x", ptgt->tgt_d_id);
6731 			}
6732 		}
6733 		rval = DDI_SUCCESS;
6734 	}
6735 
6736 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6737 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6738 	    "D_ID=%x, sense=%x, status=%x",
6739 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6740 	    rsp->fcp_u.fcp_status.scsi_status);
6741 
6742 	return (rval);
6743 }
6744 
6745 /*
6746  *     Function: fcp_scsi_callback
6747  *
6748  *  Description: This is the callback routine set by fcp_send_scsi() after
6749  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6750  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6751  *		 INQUIRY_PAGE83.
6752  *
6753  *     Argument: *fpkt	 FC packet used to convey the command
6754  *
6755  * Return Value: None
6756  */
6757 static void
6758 fcp_scsi_callback(fc_packet_t *fpkt)
6759 {
6760 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6761 	    fpkt->pkt_ulp_private;
6762 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6763 	struct fcp_port	*pptr;
6764 	struct fcp_tgt	*ptgt;
6765 	struct fcp_lun	*plun;
6766 	struct fcp_rsp		response, *rsp;
6767 
6768 	if (icmd->ipkt_nodma) {
6769 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6770 	} else {
6771 		rsp = &response;
6772 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6773 		    sizeof (struct fcp_rsp));
6774 	}
6775 
6776 	ptgt = icmd->ipkt_tgt;
6777 	pptr = ptgt->tgt_port;
6778 	plun = icmd->ipkt_lun;
6779 
6780 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6781 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6782 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6783 	    "status=%x, lun num=%x",
6784 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6785 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6786 
6787 	/*
6788 	 * Pre-init LUN GUID with NWWN if it is not a device that
6789 	 * supports multiple luns and we know it's not page83
6790 	 * compliant.  Although using a NWWN is not lun unique,
6791 	 * we will be fine since there is only one lun behind the taget
6792 	 * in this case.
6793 	 */
6794 	if ((plun->lun_guid_size == 0) &&
6795 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6796 	    (fcp_symmetric_device_probe(plun) == 0)) {
6797 
6798 		char ascii_wwn[FC_WWN_SIZE*2+1];
6799 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6800 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6801 	}
6802 
6803 	/*
6804 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6805 	 * when thay have more data than what is asked in CDB. An overrun
6806 	 * is really when FCP_DL is smaller than the data length in CDB.
6807 	 * In the case here we know that REPORT LUN command we formed within
6808 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6809 	 * behavior. In reality this is FC_SUCCESS.
6810 	 */
6811 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6812 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6813 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6814 		fpkt->pkt_state = FC_PKT_SUCCESS;
6815 	}
6816 
6817 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6818 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6819 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6820 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6821 		    ptgt->tgt_d_id);
6822 
6823 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6824 			/*
6825 			 * Inquiry VPD page command on A5K SES devices would
6826 			 * result in data CRC errors.
6827 			 */
6828 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6829 				(void) fcp_handle_page83(fpkt, icmd, 1);
6830 				return;
6831 			}
6832 		}
6833 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6834 		    FCP_MUST_RETRY(fpkt)) {
6835 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6836 			fcp_retry_scsi_cmd(fpkt);
6837 			return;
6838 		}
6839 
6840 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6841 		    FCP_TGT_TRACE_20);
6842 
6843 		mutex_enter(&pptr->port_mutex);
6844 		mutex_enter(&ptgt->tgt_mutex);
6845 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6846 			mutex_exit(&ptgt->tgt_mutex);
6847 			mutex_exit(&pptr->port_mutex);
6848 			fcp_print_error(fpkt);
6849 		} else {
6850 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6851 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6852 			    "fcp_scsi_callback,1: state change occured"
6853 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6854 			mutex_exit(&ptgt->tgt_mutex);
6855 			mutex_exit(&pptr->port_mutex);
6856 		}
6857 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6858 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6859 		fcp_icmd_free(pptr, icmd);
6860 		return;
6861 	}
6862 
6863 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6864 
6865 	mutex_enter(&pptr->port_mutex);
6866 	mutex_enter(&ptgt->tgt_mutex);
6867 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6868 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6869 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6870 		    "fcp_scsi_callback,2: state change occured"
6871 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6872 		mutex_exit(&ptgt->tgt_mutex);
6873 		mutex_exit(&pptr->port_mutex);
6874 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6875 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6876 		fcp_icmd_free(pptr, icmd);
6877 		return;
6878 	}
6879 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6880 
6881 	mutex_exit(&ptgt->tgt_mutex);
6882 	mutex_exit(&pptr->port_mutex);
6883 
6884 	if (icmd->ipkt_nodma) {
6885 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
6886 		    sizeof (struct fcp_rsp));
6887 	} else {
6888 		bep = &fcp_rsp_err;
6889 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
6890 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
6891 	}
6892 
6893 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
6894 		fcp_retry_scsi_cmd(fpkt);
6895 		return;
6896 	}
6897 
6898 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
6899 	    FCP_NO_FAILURE) {
6900 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6901 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6902 		    "rsp_code=0x%x, rsp_len_set=0x%x",
6903 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
6904 		fcp_retry_scsi_cmd(fpkt);
6905 		return;
6906 	}
6907 
6908 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
6909 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
6910 		fcp_queue_ipkt(pptr, fpkt);
6911 		return;
6912 	}
6913 
6914 	/*
6915 	 * Devices that do not support INQUIRY_PAGE83, return check condition
6916 	 * with illegal request as per SCSI spec.
6917 	 * Crossbridge is one such device and Daktari's SES node is another.
6918 	 * We want to ideally enumerate these devices as a non-mpxio devices.
6919 	 * SES nodes (Daktari only currently) are an exception to this.
6920 	 */
6921 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6922 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
6923 
6924 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6925 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
6926 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
6927 		    "check condition. May enumerate as non-mpxio device",
6928 		    ptgt->tgt_d_id, plun->lun_type);
6929 
6930 		/*
6931 		 * If we let Daktari's SES be enumerated as a non-mpxio
6932 		 * device, there will be a discrepency in that the other
6933 		 * internal FC disks will get enumerated as mpxio devices.
6934 		 * Applications like luxadm expect this to be consistent.
6935 		 *
6936 		 * So, we put in a hack here to check if this is an SES device
6937 		 * and handle it here.
6938 		 */
6939 		if (plun->lun_type == DTYPE_ESI) {
6940 			/*
6941 			 * Since, pkt_state is actually FC_PKT_SUCCESS
6942 			 * at this stage, we fake a failure here so that
6943 			 * fcp_handle_page83 will create a device path using
6944 			 * the WWN instead of the GUID which is not there anyway
6945 			 */
6946 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
6947 			(void) fcp_handle_page83(fpkt, icmd, 1);
6948 			return;
6949 		}
6950 
6951 		mutex_enter(&ptgt->tgt_mutex);
6952 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
6953 		    FCP_LUN_MARK | FCP_LUN_BUSY);
6954 		mutex_exit(&ptgt->tgt_mutex);
6955 
6956 		(void) fcp_call_finish_init(pptr, ptgt,
6957 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6958 		    icmd->ipkt_cause);
6959 		fcp_icmd_free(pptr, icmd);
6960 		return;
6961 	}
6962 
6963 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6964 		int rval = DDI_FAILURE;
6965 
6966 		/*
6967 		 * handle cases where report lun isn't supported
6968 		 * by faking up our own REPORT_LUN response or
6969 		 * UNIT ATTENTION
6970 		 */
6971 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
6972 			rval = fcp_check_reportlun(rsp, fpkt);
6973 
6974 			/*
6975 			 * fcp_check_reportlun might have modified the
6976 			 * FCP response. Copy it in again to get an updated
6977 			 * FCP response
6978 			 */
6979 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
6980 				rsp = &response;
6981 
6982 				FCP_CP_IN(fpkt->pkt_resp, rsp,
6983 				    fpkt->pkt_resp_acc,
6984 				    sizeof (struct fcp_rsp));
6985 			}
6986 		}
6987 
6988 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6989 			if (rval == DDI_SUCCESS) {
6990 				(void) fcp_call_finish_init(pptr, ptgt,
6991 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6992 				    icmd->ipkt_cause);
6993 				fcp_icmd_free(pptr, icmd);
6994 			} else {
6995 				fcp_retry_scsi_cmd(fpkt);
6996 			}
6997 
6998 			return;
6999 		}
7000 	} else {
7001 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7002 			mutex_enter(&ptgt->tgt_mutex);
7003 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7004 			mutex_exit(&ptgt->tgt_mutex);
7005 		}
7006 	}
7007 
7008 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7009 
7010 	(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU);
7011 
7012 	switch (icmd->ipkt_opcode) {
7013 	case SCMD_INQUIRY:
7014 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7015 		fcp_handle_inquiry(fpkt, icmd);
7016 		break;
7017 
7018 	case SCMD_REPORT_LUN:
7019 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7020 		    FCP_TGT_TRACE_22);
7021 		fcp_handle_reportlun(fpkt, icmd);
7022 		break;
7023 
7024 	case SCMD_INQUIRY_PAGE83:
7025 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7026 		(void) fcp_handle_page83(fpkt, icmd, 0);
7027 		break;
7028 
7029 	default:
7030 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7031 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7032 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7033 		fcp_icmd_free(pptr, icmd);
7034 		break;
7035 	}
7036 }
7037 
7038 
7039 static void
7040 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7041 {
7042 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7043 	    fpkt->pkt_ulp_private;
7044 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7045 	struct fcp_port	*pptr = ptgt->tgt_port;
7046 
7047 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7048 	    fcp_is_retryable(icmd)) {
7049 		mutex_enter(&pptr->port_mutex);
7050 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7051 			mutex_exit(&pptr->port_mutex);
7052 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7053 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7054 			    "Retrying %s to %x; state=%x, reason=%x",
7055 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7056 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7057 			    fpkt->pkt_state, fpkt->pkt_reason);
7058 
7059 			fcp_queue_ipkt(pptr, fpkt);
7060 		} else {
7061 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 			    "fcp_retry_scsi_cmd,1: state change occured"
7064 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7065 			mutex_exit(&pptr->port_mutex);
7066 			(void) fcp_call_finish_init(pptr, ptgt,
7067 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7068 			    icmd->ipkt_cause);
7069 			fcp_icmd_free(pptr, icmd);
7070 		}
7071 	} else {
7072 		fcp_print_error(fpkt);
7073 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7074 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7075 		fcp_icmd_free(pptr, icmd);
7076 	}
7077 }
7078 
7079 /*
7080  *     Function: fcp_handle_page83
7081  *
7082  *  Description: Treats the response to INQUIRY_PAGE83.
7083  *
7084  *     Argument: *fpkt	FC packet used to convey the command.
7085  *		 *icmd	Original fcp_ipkt structure.
7086  *		 ignore_page83_data
7087  *			if it's 1, that means it's a special devices's
7088  *			page83 response, it should be enumerated under mpxio
7089  *
7090  * Return Value: None
7091  */
7092 static void
7093 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7094     int ignore_page83_data)
7095 {
7096 	struct fcp_port	*pptr;
7097 	struct fcp_lun	*plun;
7098 	struct fcp_tgt	*ptgt;
7099 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7100 	int			fail = 0;
7101 	ddi_devid_t		devid;
7102 	char			*guid = NULL;
7103 	int			ret;
7104 
7105 	ASSERT(icmd != NULL && fpkt != NULL);
7106 
7107 	pptr = icmd->ipkt_port;
7108 	ptgt = icmd->ipkt_tgt;
7109 	plun = icmd->ipkt_lun;
7110 
7111 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7112 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7113 
7114 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7115 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7116 
7117 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7118 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7119 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7120 		    "dtype=0x%x, lun num=%x",
7121 		    pptr->port_instance, ptgt->tgt_d_id,
7122 		    dev_id_page[0], plun->lun_num);
7123 
7124 		ret = ddi_devid_scsi_encode(
7125 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7126 		    NULL,		/* driver name */
7127 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7128 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7129 		    NULL,		/* page 80 data */
7130 		    0,		/* page 80 len */
7131 		    dev_id_page,	/* page 83 data */
7132 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7133 		    &devid);
7134 
7135 		if (ret == DDI_SUCCESS) {
7136 
7137 			guid = ddi_devid_to_guid(devid);
7138 
7139 			if (guid) {
7140 				/*
7141 				 * Check our current guid.  If it's non null
7142 				 * and it has changed, we need to copy it into
7143 				 * lun_old_guid since we might still need it.
7144 				 */
7145 				if (plun->lun_guid &&
7146 				    strcmp(guid, plun->lun_guid)) {
7147 					unsigned int len;
7148 
7149 					/*
7150 					 * If the guid of the LUN changes,
7151 					 * reconfiguration should be triggered
7152 					 * to reflect the changes.
7153 					 * i.e. we should offline the LUN with
7154 					 * the old guid, and online the LUN with
7155 					 * the new guid.
7156 					 */
7157 					plun->lun_state |= FCP_LUN_CHANGED;
7158 
7159 					if (plun->lun_old_guid) {
7160 						kmem_free(plun->lun_old_guid,
7161 						    plun->lun_old_guid_size);
7162 					}
7163 
7164 					len = plun->lun_guid_size;
7165 					plun->lun_old_guid_size = len;
7166 
7167 					plun->lun_old_guid = kmem_zalloc(len,
7168 					    KM_NOSLEEP);
7169 
7170 					if (plun->lun_old_guid) {
7171 						/*
7172 						 * The alloc was successful then
7173 						 * let's do the copy.
7174 						 */
7175 						bcopy(plun->lun_guid,
7176 						    plun->lun_old_guid, len);
7177 					} else {
7178 						fail = 1;
7179 						plun->lun_old_guid_size = 0;
7180 					}
7181 				}
7182 				if (!fail) {
7183 					if (fcp_copy_guid_2_lun_block(
7184 					    plun, guid)) {
7185 						fail = 1;
7186 					}
7187 				}
7188 				ddi_devid_free_guid(guid);
7189 
7190 			} else {
7191 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7193 				    "fcp_handle_page83: unable to create "
7194 				    "GUID");
7195 
7196 				/* couldn't create good guid from devid */
7197 				fail = 1;
7198 			}
7199 			ddi_devid_free(devid);
7200 
7201 		} else if (ret == DDI_NOT_WELL_FORMED) {
7202 			/* NULL filled data for page 83 */
7203 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7204 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7205 			    "fcp_handle_page83: retry GUID");
7206 
7207 			icmd->ipkt_retries = 0;
7208 			fcp_retry_scsi_cmd(fpkt);
7209 			return;
7210 		} else {
7211 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7212 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7213 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7214 			    ret);
7215 			/*
7216 			 * Since the page83 validation
7217 			 * introduced late, we are being
7218 			 * tolerant to the existing devices
7219 			 * that already found to be working
7220 			 * under mpxio, like A5200's SES device,
7221 			 * its page83 response will not be standard-compliant,
7222 			 * but we still want it to be enumerated under mpxio.
7223 			 */
7224 			if (fcp_symmetric_device_probe(plun) != 0) {
7225 				fail = 1;
7226 			}
7227 		}
7228 
7229 	} else {
7230 		/* bad packet state */
7231 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7232 
7233 		/*
7234 		 * For some special devices (A5K SES and Daktari's SES devices),
7235 		 * they should be enumerated under mpxio
7236 		 * or "luxadm dis" will fail
7237 		 */
7238 		if (ignore_page83_data) {
7239 			fail = 0;
7240 		} else {
7241 			fail = 1;
7242 		}
7243 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7244 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7245 		    "!Devid page cmd failed. "
7246 		    "fpkt_state: %x fpkt_reason: %x",
7247 		    "ignore_page83: %d",
7248 		    fpkt->pkt_state, fpkt->pkt_reason,
7249 		    ignore_page83_data);
7250 	}
7251 
7252 	mutex_enter(&pptr->port_mutex);
7253 	mutex_enter(&plun->lun_mutex);
7254 	/*
7255 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7256 	 * mismatch between lun_cip and lun_mpxio.
7257 	 */
7258 	if (plun->lun_cip == NULL) {
7259 		/*
7260 		 * If we don't have a guid for this lun it's because we were
7261 		 * unable to glean one from the page 83 response.  Set the
7262 		 * control flag to 0 here to make sure that we don't attempt to
7263 		 * enumerate it under mpxio.
7264 		 */
7265 		if (fail || pptr->port_mpxio == 0) {
7266 			plun->lun_mpxio = 0;
7267 		} else {
7268 			plun->lun_mpxio = 1;
7269 		}
7270 	}
7271 	mutex_exit(&plun->lun_mutex);
7272 	mutex_exit(&pptr->port_mutex);
7273 
7274 	mutex_enter(&ptgt->tgt_mutex);
7275 	plun->lun_state &=
7276 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7277 	mutex_exit(&ptgt->tgt_mutex);
7278 
7279 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7280 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7281 
7282 	fcp_icmd_free(pptr, icmd);
7283 }
7284 
7285 /*
7286  *     Function: fcp_handle_inquiry
7287  *
7288  *  Description: Called by fcp_scsi_callback to handle the response to an
7289  *		 INQUIRY request.
7290  *
7291  *     Argument: *fpkt	FC packet used to convey the command.
7292  *		 *icmd	Original fcp_ipkt structure.
7293  *
7294  * Return Value: None
7295  */
7296 static void
7297 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7298 {
7299 	struct fcp_port	*pptr;
7300 	struct fcp_lun	*plun;
7301 	struct fcp_tgt	*ptgt;
7302 	uchar_t		dtype;
7303 	uchar_t		pqual;
7304 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7305 
7306 	ASSERT(icmd != NULL && fpkt != NULL);
7307 
7308 	pptr = icmd->ipkt_port;
7309 	ptgt = icmd->ipkt_tgt;
7310 	plun = icmd->ipkt_lun;
7311 
7312 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7313 	    sizeof (struct scsi_inquiry));
7314 
7315 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7316 	pqual = plun->lun_inq.inq_dtype >> 5;
7317 
7318 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7319 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7320 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7321 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7322 	    plun->lun_num, dtype, pqual);
7323 
7324 	if (pqual != 0) {
7325 		/*
7326 		 * Non-zero peripheral qualifier
7327 		 */
7328 		fcp_log(CE_CONT, pptr->port_dip,
7329 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7330 		    "Device type=0x%x Peripheral qual=0x%x\n",
7331 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7332 
7333 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7334 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7335 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7336 		    "Device type=0x%x Peripheral qual=0x%x\n",
7337 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7338 
7339 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7340 
7341 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7342 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7343 		fcp_icmd_free(pptr, icmd);
7344 		return;
7345 	}
7346 
7347 	/*
7348 	 * If the device is already initialized, check the dtype
7349 	 * for a change. If it has changed then update the flags
7350 	 * so the create_luns will offline the old device and
7351 	 * create the new device. Refer to bug: 4764752
7352 	 */
7353 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7354 		plun->lun_state |= FCP_LUN_CHANGED;
7355 	}
7356 	plun->lun_type = plun->lun_inq.inq_dtype;
7357 
7358 	/*
7359 	 * This code is setting/initializing the throttling in the FCA
7360 	 * driver.
7361 	 */
7362 	mutex_enter(&pptr->port_mutex);
7363 	if (!pptr->port_notify) {
7364 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7365 			uint32_t cmd = 0;
7366 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7367 			    ((cmd & 0xFFFFFF00 >> 8) |
7368 			    FCP_SVE_THROTTLE << 8));
7369 			pptr->port_notify = 1;
7370 			mutex_exit(&pptr->port_mutex);
7371 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7372 			mutex_enter(&pptr->port_mutex);
7373 		}
7374 	}
7375 
7376 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7377 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7378 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7379 		    "fcp_handle_inquiry,1:state change occured"
7380 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7381 		mutex_exit(&pptr->port_mutex);
7382 
7383 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7384 		(void) fcp_call_finish_init(pptr, ptgt,
7385 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7386 		    icmd->ipkt_cause);
7387 		fcp_icmd_free(pptr, icmd);
7388 		return;
7389 	}
7390 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7391 	mutex_exit(&pptr->port_mutex);
7392 
7393 	/* Retrieve the rscn count (if a valid one exists) */
7394 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7395 		rscn_count = ((fc_ulp_rscn_info_t *)
7396 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7397 	} else {
7398 		rscn_count = FC_INVALID_RSCN_COUNT;
7399 	}
7400 
7401 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7402 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7403 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7404 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7405 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7406 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7407 		(void) fcp_call_finish_init(pptr, ptgt,
7408 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7409 		    icmd->ipkt_cause);
7410 	}
7411 
7412 	/*
7413 	 * Read Inquiry VPD Page 0x83 to uniquely
7414 	 * identify this logical unit.
7415 	 */
7416 	fcp_icmd_free(pptr, icmd);
7417 }
7418 
7419 /*
7420  *     Function: fcp_handle_reportlun
7421  *
7422  *  Description: Called by fcp_scsi_callback to handle the response to a
7423  *		 REPORT_LUN request.
7424  *
7425  *     Argument: *fpkt	FC packet used to convey the command.
7426  *		 *icmd	Original fcp_ipkt structure.
7427  *
7428  * Return Value: None
7429  */
7430 static void
7431 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7432 {
7433 	int				i;
7434 	int				nluns_claimed;
7435 	int				nluns_bufmax;
7436 	int				len;
7437 	uint16_t			lun_num;
7438 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7439 	struct fcp_port			*pptr;
7440 	struct fcp_tgt			*ptgt;
7441 	struct fcp_lun			*plun;
7442 	struct fcp_reportlun_resp	*report_lun;
7443 
7444 	pptr = icmd->ipkt_port;
7445 	ptgt = icmd->ipkt_tgt;
7446 	len = fpkt->pkt_datalen;
7447 
7448 	if ((len < FCP_LUN_HEADER) ||
7449 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7450 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7451 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7452 		fcp_icmd_free(pptr, icmd);
7453 		return;
7454 	}
7455 
7456 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7457 	    fpkt->pkt_datalen);
7458 
7459 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7460 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7461 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7462 	    pptr->port_instance, ptgt->tgt_d_id);
7463 
7464 	/*
7465 	 * Get the number of luns (which is supplied as LUNS * 8) the
7466 	 * device claims it has.
7467 	 */
7468 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7469 
7470 	/*
7471 	 * Get the maximum number of luns the buffer submitted can hold.
7472 	 */
7473 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7474 
7475 	/*
7476 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7477 	 */
7478 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7479 		kmem_free(report_lun, len);
7480 
7481 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7482 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7483 		    ptgt->tgt_d_id);
7484 
7485 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7486 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7487 		fcp_icmd_free(pptr, icmd);
7488 		return;
7489 	}
7490 
7491 	/*
7492 	 * If there are more LUNs than we have allocated memory for,
7493 	 * allocate more space and send down yet another report lun if
7494 	 * the maximum number of attempts hasn't been reached.
7495 	 */
7496 	mutex_enter(&ptgt->tgt_mutex);
7497 
7498 	if ((nluns_claimed > nluns_bufmax) &&
7499 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7500 
7501 		struct fcp_lun *plun;
7502 
7503 		ptgt->tgt_report_lun_cnt++;
7504 		plun = ptgt->tgt_lun;
7505 		ASSERT(plun != NULL);
7506 		mutex_exit(&ptgt->tgt_mutex);
7507 
7508 		kmem_free(report_lun, len);
7509 
7510 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7511 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7512 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7513 		    nluns_claimed, ptgt->tgt_d_id);
7514 
7515 		/* Retrieve the rscn count (if a valid one exists) */
7516 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7517 			rscn_count = ((fc_ulp_rscn_info_t *)
7518 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7519 			    ulp_rscn_count;
7520 		} else {
7521 			rscn_count = FC_INVALID_RSCN_COUNT;
7522 		}
7523 
7524 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7525 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7526 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7527 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7528 			(void) fcp_call_finish_init(pptr, ptgt,
7529 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7530 			    icmd->ipkt_cause);
7531 		}
7532 
7533 		fcp_icmd_free(pptr, icmd);
7534 		return;
7535 	}
7536 
7537 	if (nluns_claimed > nluns_bufmax) {
7538 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7539 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7540 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7541 		    "	 Number of LUNs lost=%x",
7542 		    ptgt->tgt_port_wwn.raw_wwn[0],
7543 		    ptgt->tgt_port_wwn.raw_wwn[1],
7544 		    ptgt->tgt_port_wwn.raw_wwn[2],
7545 		    ptgt->tgt_port_wwn.raw_wwn[3],
7546 		    ptgt->tgt_port_wwn.raw_wwn[4],
7547 		    ptgt->tgt_port_wwn.raw_wwn[5],
7548 		    ptgt->tgt_port_wwn.raw_wwn[6],
7549 		    ptgt->tgt_port_wwn.raw_wwn[7],
7550 		    nluns_claimed - nluns_bufmax);
7551 
7552 		nluns_claimed = nluns_bufmax;
7553 	}
7554 	ptgt->tgt_lun_cnt = nluns_claimed;
7555 
7556 	/*
7557 	 * Identify missing LUNs and print warning messages
7558 	 */
7559 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7560 		int offline;
7561 		int exists = 0;
7562 
7563 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7564 
7565 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7566 			uchar_t		*lun_string;
7567 
7568 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7569 
7570 			switch (lun_string[0] & 0xC0) {
7571 			case FCP_LUN_ADDRESSING:
7572 			case FCP_PD_ADDRESSING:
7573 			case FCP_VOLUME_ADDRESSING:
7574 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7575 				    lun_string[1];
7576 				if (plun->lun_num == lun_num) {
7577 					exists++;
7578 					break;
7579 				}
7580 				break;
7581 
7582 			default:
7583 				break;
7584 			}
7585 		}
7586 
7587 		if (!exists && !offline) {
7588 			mutex_exit(&ptgt->tgt_mutex);
7589 
7590 			mutex_enter(&pptr->port_mutex);
7591 			mutex_enter(&ptgt->tgt_mutex);
7592 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7593 				/*
7594 				 * set disappear flag when device was connected
7595 				 */
7596 				if (!(plun->lun_state &
7597 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7598 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7599 				}
7600 				mutex_exit(&ptgt->tgt_mutex);
7601 				mutex_exit(&pptr->port_mutex);
7602 				if (!(plun->lun_state &
7603 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7604 					fcp_log(CE_NOTE, pptr->port_dip,
7605 					    "!Lun=%x for target=%x disappeared",
7606 					    plun->lun_num, ptgt->tgt_d_id);
7607 				}
7608 				mutex_enter(&ptgt->tgt_mutex);
7609 			} else {
7610 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7611 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7612 				    "fcp_handle_reportlun,1: state change"
7613 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7614 				mutex_exit(&ptgt->tgt_mutex);
7615 				mutex_exit(&pptr->port_mutex);
7616 				kmem_free(report_lun, len);
7617 				(void) fcp_call_finish_init(pptr, ptgt,
7618 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7619 				    icmd->ipkt_cause);
7620 				fcp_icmd_free(pptr, icmd);
7621 				return;
7622 			}
7623 		} else if (exists) {
7624 			/*
7625 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7626 			 * actually exists in REPORT_LUN response
7627 			 */
7628 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7629 				plun->lun_state &=
7630 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7631 			}
7632 			if (offline || plun->lun_num == 0) {
7633 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7634 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7635 					mutex_exit(&ptgt->tgt_mutex);
7636 					fcp_log(CE_NOTE, pptr->port_dip,
7637 					    "!Lun=%x for target=%x reappeared",
7638 					    plun->lun_num, ptgt->tgt_d_id);
7639 					mutex_enter(&ptgt->tgt_mutex);
7640 				}
7641 			}
7642 		}
7643 	}
7644 
7645 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7646 	mutex_exit(&ptgt->tgt_mutex);
7647 
7648 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7649 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7650 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7651 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7652 
7653 	/* scan each lun */
7654 	for (i = 0; i < nluns_claimed; i++) {
7655 		uchar_t	*lun_string;
7656 
7657 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7658 
7659 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7660 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7661 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7662 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7663 		    lun_string[0]);
7664 
7665 		switch (lun_string[0] & 0xC0) {
7666 		case FCP_LUN_ADDRESSING:
7667 		case FCP_PD_ADDRESSING:
7668 		case FCP_VOLUME_ADDRESSING:
7669 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7670 
7671 			/* We will skip masked LUNs because of the blacklist. */
7672 			if (fcp_lun_blacklist != NULL) {
7673 				mutex_enter(&ptgt->tgt_mutex);
7674 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7675 				    lun_num) == TRUE) {
7676 					ptgt->tgt_lun_cnt--;
7677 					mutex_exit(&ptgt->tgt_mutex);
7678 					break;
7679 				}
7680 				mutex_exit(&ptgt->tgt_mutex);
7681 			}
7682 
7683 			/* see if this LUN is already allocated */
7684 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7685 				plun = fcp_alloc_lun(ptgt);
7686 				if (plun == NULL) {
7687 					fcp_log(CE_NOTE, pptr->port_dip,
7688 					    "!Lun allocation failed"
7689 					    " target=%x lun=%x",
7690 					    ptgt->tgt_d_id, lun_num);
7691 					break;
7692 				}
7693 			}
7694 
7695 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7696 			/* convert to LUN */
7697 			plun->lun_addr.ent_addr_0 =
7698 			    BE_16(*(uint16_t *)&(lun_string[0]));
7699 			plun->lun_addr.ent_addr_1 =
7700 			    BE_16(*(uint16_t *)&(lun_string[2]));
7701 			plun->lun_addr.ent_addr_2 =
7702 			    BE_16(*(uint16_t *)&(lun_string[4]));
7703 			plun->lun_addr.ent_addr_3 =
7704 			    BE_16(*(uint16_t *)&(lun_string[6]));
7705 
7706 			plun->lun_num = lun_num;
7707 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7708 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7709 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7710 
7711 			/* Retrieve the rscn count (if a valid one exists) */
7712 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7713 				rscn_count = ((fc_ulp_rscn_info_t *)
7714 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7715 				    ulp_rscn_count;
7716 			} else {
7717 				rscn_count = FC_INVALID_RSCN_COUNT;
7718 			}
7719 
7720 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7721 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7722 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7723 				mutex_enter(&pptr->port_mutex);
7724 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7725 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7726 					fcp_log(CE_NOTE, pptr->port_dip,
7727 					    "!failed to send INQUIRY"
7728 					    " target=%x lun=%x",
7729 					    ptgt->tgt_d_id, plun->lun_num);
7730 				} else {
7731 					FCP_TRACE(fcp_logq,
7732 					    pptr->port_instbuf, fcp_trace,
7733 					    FCP_BUF_LEVEL_5, 0,
7734 					    "fcp_handle_reportlun,2: state"
7735 					    " change occured for D_ID=0x%x",
7736 					    ptgt->tgt_d_id);
7737 				}
7738 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7739 				mutex_exit(&pptr->port_mutex);
7740 			} else {
7741 				continue;
7742 			}
7743 			break;
7744 
7745 		default:
7746 			fcp_log(CE_WARN, NULL,
7747 			    "!Unsupported LUN Addressing method %x "
7748 			    "in response to REPORT_LUN", lun_string[0]);
7749 			break;
7750 		}
7751 
7752 		/*
7753 		 * each time through this loop we should decrement
7754 		 * the tmp_cnt by one -- since we go through this loop
7755 		 * one time for each LUN, the tmp_cnt should never be <=0
7756 		 */
7757 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7758 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7759 	}
7760 
7761 	if (i == 0) {
7762 		fcp_log(CE_WARN, pptr->port_dip,
7763 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7764 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7765 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7766 	}
7767 
7768 	kmem_free(report_lun, len);
7769 	fcp_icmd_free(pptr, icmd);
7770 }
7771 
7772 
7773 /*
7774  * called internally to return a LUN given a target and a LUN number
7775  */
7776 static struct fcp_lun *
7777 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7778 {
7779 	struct fcp_lun	*plun;
7780 
7781 	mutex_enter(&ptgt->tgt_mutex);
7782 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7783 		if (plun->lun_num == lun_num) {
7784 			mutex_exit(&ptgt->tgt_mutex);
7785 			return (plun);
7786 		}
7787 	}
7788 	mutex_exit(&ptgt->tgt_mutex);
7789 
7790 	return (NULL);
7791 }
7792 
7793 
7794 /*
7795  * handle finishing one target for fcp_finish_init
7796  *
7797  * return true (non-zero) if we want finish_init to continue with the
7798  * next target
7799  *
7800  * called with the port mutex held
7801  */
7802 /*ARGSUSED*/
7803 static int
7804 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7805     int link_cnt, int tgt_cnt, int cause)
7806 {
7807 	int	rval = 1;
7808 	ASSERT(pptr != NULL);
7809 	ASSERT(ptgt != NULL);
7810 
7811 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7812 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7813 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7814 	    ptgt->tgt_state);
7815 
7816 	ASSERT(mutex_owned(&pptr->port_mutex));
7817 
7818 	if ((pptr->port_link_cnt != link_cnt) ||
7819 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7820 		/*
7821 		 * oh oh -- another link reset or target change
7822 		 * must have occurred while we are in here
7823 		 */
7824 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7825 
7826 		return (0);
7827 	} else {
7828 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7829 	}
7830 
7831 	mutex_enter(&ptgt->tgt_mutex);
7832 
7833 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7834 		/*
7835 		 * tgt is not offline -- is it marked (i.e. needs
7836 		 * to be offlined) ??
7837 		 */
7838 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7839 			/*
7840 			 * this target not offline *and*
7841 			 * marked
7842 			 */
7843 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7844 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7845 			    tgt_cnt, 0, 0);
7846 		} else {
7847 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7848 
7849 			/* create the LUNs */
7850 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7851 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7852 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7853 				    cause);
7854 				ptgt->tgt_device_created = 1;
7855 			} else {
7856 				fcp_update_tgt_state(ptgt, FCP_RESET,
7857 				    FCP_LUN_BUSY);
7858 			}
7859 		}
7860 	}
7861 
7862 	mutex_exit(&ptgt->tgt_mutex);
7863 
7864 	return (rval);
7865 }
7866 
7867 
7868 /*
7869  * this routine is called to finish port initialization
7870  *
7871  * Each port has a "temp" counter -- when a state change happens (e.g.
7872  * port online), the temp count is set to the number of devices in the map.
7873  * Then, as each device gets "discovered", the temp counter is decremented
7874  * by one.  When this count reaches zero we know that all of the devices
7875  * in the map have been discovered (or an error has occurred), so we can
7876  * then finish initialization -- which is done by this routine (well, this
7877  * and fcp-finish_tgt())
7878  *
7879  * acquires and releases the global mutex
7880  *
7881  * called with the port mutex owned
7882  */
7883 static void
7884 fcp_finish_init(struct fcp_port *pptr)
7885 {
7886 #ifdef	DEBUG
7887 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
7888 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
7889 	    FCP_STACK_DEPTH);
7890 #endif /* DEBUG */
7891 
7892 	ASSERT(mutex_owned(&pptr->port_mutex));
7893 
7894 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7895 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
7896 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
7897 
7898 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
7899 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
7900 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
7901 		pptr->port_state &= ~FCP_STATE_ONLINING;
7902 		pptr->port_state |= FCP_STATE_ONLINE;
7903 	}
7904 
7905 	/* Wake up threads waiting on config done */
7906 	cv_broadcast(&pptr->port_config_cv);
7907 }
7908 
7909 
7910 /*
7911  * called from fcp_finish_init to create the LUNs for a target
7912  *
7913  * called with the port mutex owned
7914  */
7915 static void
7916 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
7917 {
7918 	struct fcp_lun	*plun;
7919 	struct fcp_port	*pptr;
7920 	child_info_t		*cip = NULL;
7921 
7922 	ASSERT(ptgt != NULL);
7923 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
7924 
7925 	pptr = ptgt->tgt_port;
7926 
7927 	ASSERT(pptr != NULL);
7928 
7929 	/* scan all LUNs for this target */
7930 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7931 		if (plun->lun_state & FCP_LUN_OFFLINE) {
7932 			continue;
7933 		}
7934 
7935 		if (plun->lun_state & FCP_LUN_MARK) {
7936 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7937 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7938 			    "fcp_create_luns: offlining marked LUN!");
7939 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
7940 			continue;
7941 		}
7942 
7943 		plun->lun_state &= ~FCP_LUN_BUSY;
7944 
7945 		/*
7946 		 * There are conditions in which FCP_LUN_INIT flag is cleared
7947 		 * but we have a valid plun->lun_cip. To cover this case also
7948 		 * CLEAR_BUSY whenever we have a valid lun_cip.
7949 		 */
7950 		if (plun->lun_mpxio && plun->lun_cip &&
7951 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
7952 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
7953 		    0, 0))) {
7954 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7955 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7956 			    "fcp_create_luns: enable lun %p failed!",
7957 			    plun);
7958 		}
7959 
7960 		if (plun->lun_state & FCP_LUN_INIT &&
7961 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
7962 			continue;
7963 		}
7964 
7965 		if (cause == FCP_CAUSE_USER_CREATE) {
7966 			continue;
7967 		}
7968 
7969 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7970 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
7971 		    "create_luns: passing ONLINE elem to HP thread");
7972 
7973 		/*
7974 		 * If lun has changed, prepare for offlining the old path.
7975 		 * Do not offline the old path right now, since it may be
7976 		 * still opened.
7977 		 */
7978 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
7979 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
7980 		}
7981 
7982 		/* pass an ONLINE element to the hotplug thread */
7983 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7984 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
7985 
7986 			/*
7987 			 * We can not synchronous attach (i.e pass
7988 			 * NDI_ONLINE_ATTACH) here as we might be
7989 			 * coming from an interrupt or callback
7990 			 * thread.
7991 			 */
7992 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7993 			    link_cnt, tgt_cnt, 0, 0)) {
7994 				fcp_log(CE_CONT, pptr->port_dip,
7995 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
7996 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
7997 			}
7998 		}
7999 	}
8000 }
8001 
8002 
8003 /*
8004  * function to online/offline devices
8005  */
8006 static int
8007 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8008     int online, int lcount, int tcount, int flags)
8009 {
8010 	int			rval = NDI_FAILURE;
8011 	int			circ;
8012 	child_info_t		*ccip;
8013 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8014 	int			is_mpxio = pptr->port_mpxio;
8015 	dev_info_t		*cdip, *pdip;
8016 	char			*devname;
8017 
8018 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8019 		/*
8020 		 * When this event gets serviced, lun_cip and lun_mpxio
8021 		 * has changed, so it should be invalidated now.
8022 		 */
8023 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8024 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8025 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8026 		return (rval);
8027 	}
8028 
8029 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8030 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8031 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8032 	    "flags=%x mpxio=%x\n",
8033 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8034 	    plun->lun_mpxio);
8035 
8036 	/*
8037 	 * lun_mpxio needs checking here because we can end up in a race
8038 	 * condition where this task has been dispatched while lun_mpxio is
8039 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8040 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8041 	 * the flag. We rely on the serialization of the tasks here. We return
8042 	 * NDI_SUCCESS so any callers continue without reporting spurious
8043 	 * errors, and the still think we're an MPXIO LUN.
8044 	 */
8045 
8046 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8047 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8048 		if (plun->lun_mpxio) {
8049 			rval = fcp_update_mpxio_path(plun, cip, online);
8050 		} else {
8051 			rval = NDI_SUCCESS;
8052 		}
8053 		return (rval);
8054 	}
8055 
8056 	/*
8057 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8058 	 * executing devfs_clean() if parent lock is held.
8059 	 */
8060 	ASSERT(!servicing_interrupt());
8061 	if (online == FCP_OFFLINE) {
8062 		if (plun->lun_mpxio == 0) {
8063 			if (plun->lun_cip == cip) {
8064 				cdip = DIP(plun->lun_cip);
8065 			} else {
8066 				cdip = DIP(cip);
8067 			}
8068 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8069 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8070 		} else if ((plun->lun_cip != cip) && cip) {
8071 			/*
8072 			 * This means a DTYPE/GUID change, we shall get the
8073 			 * dip of the old cip instead of the current lun_cip.
8074 			 */
8075 			cdip = mdi_pi_get_client(PIP(cip));
8076 		}
8077 		if (cdip) {
8078 			if (i_ddi_devi_attached(cdip)) {
8079 				pdip = ddi_get_parent(cdip);
8080 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8081 				ndi_devi_enter(pdip, &circ);
8082 				(void) ddi_deviname(cdip, devname);
8083 				ndi_devi_exit(pdip, circ);
8084 				/*
8085 				 * Release parent lock before calling
8086 				 * devfs_clean().
8087 				 */
8088 				rval = devfs_clean(pdip, devname + 1,
8089 				    DV_CLEAN_FORCE);
8090 				kmem_free(devname, MAXNAMELEN + 1);
8091 				/*
8092 				 * Return if devfs_clean() fails for
8093 				 * non-MPXIO case.
8094 				 * For MPXIO case, another path could be
8095 				 * offlined.
8096 				 */
8097 				if (rval && plun->lun_mpxio == 0) {
8098 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8099 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8100 					    "fcp_trigger_lun: devfs_clean "
8101 					    "failed rval=%x  dip=%p",
8102 					    rval, pdip);
8103 					return (NDI_FAILURE);
8104 				}
8105 			}
8106 		}
8107 	}
8108 
8109 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8110 		return (NDI_FAILURE);
8111 	}
8112 
8113 	if (is_mpxio) {
8114 		mdi_devi_enter(pptr->port_dip, &circ);
8115 	} else {
8116 		ndi_devi_enter(pptr->port_dip, &circ);
8117 	}
8118 
8119 	mutex_enter(&pptr->port_mutex);
8120 	mutex_enter(&plun->lun_mutex);
8121 
8122 	if (online == FCP_ONLINE) {
8123 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8124 		if (ccip == NULL) {
8125 			goto fail;
8126 		}
8127 	} else {
8128 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8129 			goto fail;
8130 		}
8131 		ccip = cip;
8132 	}
8133 
8134 	if (online == FCP_ONLINE) {
8135 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8136 		    &circ);
8137 		fc_ulp_log_device_event(pptr->port_fp_handle,
8138 		    FC_ULP_DEVICE_ONLINE);
8139 	} else {
8140 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8141 		    &circ);
8142 		fc_ulp_log_device_event(pptr->port_fp_handle,
8143 		    FC_ULP_DEVICE_OFFLINE);
8144 	}
8145 
8146 fail:	mutex_exit(&plun->lun_mutex);
8147 	mutex_exit(&pptr->port_mutex);
8148 
8149 	if (is_mpxio) {
8150 		mdi_devi_exit(pptr->port_dip, circ);
8151 	} else {
8152 		ndi_devi_exit(pptr->port_dip, circ);
8153 	}
8154 
8155 	fc_ulp_idle_port(pptr->port_fp_handle);
8156 
8157 	return (rval);
8158 }
8159 
8160 
8161 /*
8162  * take a target offline by taking all of its LUNs offline
8163  */
8164 /*ARGSUSED*/
8165 static int
8166 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8167     int link_cnt, int tgt_cnt, int nowait, int flags)
8168 {
8169 	struct fcp_tgt_elem	*elem;
8170 
8171 	ASSERT(mutex_owned(&pptr->port_mutex));
8172 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8173 
8174 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8175 
8176 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8177 	    ptgt->tgt_change_cnt)) {
8178 		mutex_exit(&ptgt->tgt_mutex);
8179 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8180 		mutex_enter(&ptgt->tgt_mutex);
8181 
8182 		return (0);
8183 	}
8184 
8185 	ptgt->tgt_pd_handle = NULL;
8186 	mutex_exit(&ptgt->tgt_mutex);
8187 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8188 	mutex_enter(&ptgt->tgt_mutex);
8189 
8190 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8191 
8192 	if (ptgt->tgt_tcap &&
8193 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8194 		elem->flags = flags;
8195 		elem->time = fcp_watchdog_time;
8196 		if (nowait == 0) {
8197 			elem->time += fcp_offline_delay;
8198 		}
8199 		elem->ptgt = ptgt;
8200 		elem->link_cnt = link_cnt;
8201 		elem->tgt_cnt = tgt_cnt;
8202 		elem->next = pptr->port_offline_tgts;
8203 		pptr->port_offline_tgts = elem;
8204 	} else {
8205 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8206 	}
8207 
8208 	return (1);
8209 }
8210 
8211 
8212 static void
8213 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8214     int link_cnt, int tgt_cnt, int flags)
8215 {
8216 	ASSERT(mutex_owned(&pptr->port_mutex));
8217 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8218 
8219 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8220 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8221 	ptgt->tgt_pd_handle = NULL;
8222 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8223 }
8224 
8225 
8226 static void
8227 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8228     int flags)
8229 {
8230 	struct	fcp_lun	*plun;
8231 
8232 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8233 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8234 
8235 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8236 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8237 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8238 		}
8239 	}
8240 }
8241 
8242 
8243 /*
8244  * take a LUN offline
8245  *
8246  * enters and leaves with the target mutex held, releasing it in the process
8247  *
8248  * allocates memory in non-sleep mode
8249  */
8250 static void
8251 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8252     int nowait, int flags)
8253 {
8254 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8255 	struct fcp_lun_elem	*elem;
8256 
8257 	ASSERT(plun != NULL);
8258 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8259 
8260 	if (nowait) {
8261 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8262 		return;
8263 	}
8264 
8265 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8266 		elem->flags = flags;
8267 		elem->time = fcp_watchdog_time;
8268 		if (nowait == 0) {
8269 			elem->time += fcp_offline_delay;
8270 		}
8271 		elem->plun = plun;
8272 		elem->link_cnt = link_cnt;
8273 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8274 		elem->next = pptr->port_offline_luns;
8275 		pptr->port_offline_luns = elem;
8276 	} else {
8277 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8278 	}
8279 }
8280 
8281 
8282 static void
8283 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8284 {
8285 	struct fcp_pkt	*head = NULL;
8286 
8287 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8288 
8289 	mutex_exit(&LUN_TGT->tgt_mutex);
8290 
8291 	head = fcp_scan_commands(plun);
8292 	if (head != NULL) {
8293 		fcp_abort_commands(head, LUN_PORT);
8294 	}
8295 
8296 	mutex_enter(&LUN_TGT->tgt_mutex);
8297 
8298 	if (plun->lun_cip && plun->lun_mpxio) {
8299 		/*
8300 		 * Intimate MPxIO lun busy is cleared
8301 		 */
8302 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8303 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8304 		    0, 0)) {
8305 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8306 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8307 			    LUN_TGT->tgt_d_id, plun->lun_num);
8308 		}
8309 		/*
8310 		 * Intimate MPxIO that the lun is now marked for offline
8311 		 */
8312 		mutex_exit(&LUN_TGT->tgt_mutex);
8313 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8314 		mutex_enter(&LUN_TGT->tgt_mutex);
8315 	}
8316 }
8317 
8318 static void
8319 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8320     int flags)
8321 {
8322 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8323 
8324 	mutex_exit(&LUN_TGT->tgt_mutex);
8325 	fcp_update_offline_flags(plun);
8326 	mutex_enter(&LUN_TGT->tgt_mutex);
8327 
8328 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8329 
8330 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8331 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8332 	    "offline_lun: passing OFFLINE elem to HP thread");
8333 
8334 	if (plun->lun_cip) {
8335 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8336 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8337 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8338 		    LUN_TGT->tgt_trace);
8339 
8340 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8341 		    link_cnt, tgt_cnt, flags, 0)) {
8342 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8343 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8344 			    LUN_TGT->tgt_d_id, plun->lun_num);
8345 		}
8346 	}
8347 }
8348 
8349 static void
8350 fcp_scan_offline_luns(struct fcp_port *pptr)
8351 {
8352 	struct fcp_lun_elem	*elem;
8353 	struct fcp_lun_elem	*prev;
8354 	struct fcp_lun_elem	*next;
8355 
8356 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8357 
8358 	prev = NULL;
8359 	elem = pptr->port_offline_luns;
8360 	while (elem) {
8361 		next = elem->next;
8362 		if (elem->time <= fcp_watchdog_time) {
8363 			int			changed = 1;
8364 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8365 
8366 			mutex_enter(&ptgt->tgt_mutex);
8367 			if (pptr->port_link_cnt == elem->link_cnt &&
8368 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8369 				changed = 0;
8370 			}
8371 
8372 			if (!changed &&
8373 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8374 				fcp_offline_lun_now(elem->plun,
8375 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8376 			}
8377 			mutex_exit(&ptgt->tgt_mutex);
8378 
8379 			kmem_free(elem, sizeof (*elem));
8380 
8381 			if (prev) {
8382 				prev->next = next;
8383 			} else {
8384 				pptr->port_offline_luns = next;
8385 			}
8386 		} else {
8387 			prev = elem;
8388 		}
8389 		elem = next;
8390 	}
8391 }
8392 
8393 
8394 static void
8395 fcp_scan_offline_tgts(struct fcp_port *pptr)
8396 {
8397 	struct fcp_tgt_elem	*elem;
8398 	struct fcp_tgt_elem	*prev;
8399 	struct fcp_tgt_elem	*next;
8400 
8401 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8402 
8403 	prev = NULL;
8404 	elem = pptr->port_offline_tgts;
8405 	while (elem) {
8406 		next = elem->next;
8407 		if (elem->time <= fcp_watchdog_time) {
8408 			int			changed = 1;
8409 			struct fcp_tgt	*ptgt = elem->ptgt;
8410 
8411 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8412 				changed = 0;
8413 			}
8414 
8415 			mutex_enter(&ptgt->tgt_mutex);
8416 			if (!changed && !(ptgt->tgt_state &
8417 			    FCP_TGT_OFFLINE)) {
8418 				fcp_offline_target_now(pptr,
8419 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8420 				    elem->flags);
8421 			}
8422 			mutex_exit(&ptgt->tgt_mutex);
8423 
8424 			kmem_free(elem, sizeof (*elem));
8425 
8426 			if (prev) {
8427 				prev->next = next;
8428 			} else {
8429 				pptr->port_offline_tgts = next;
8430 			}
8431 		} else {
8432 			prev = elem;
8433 		}
8434 		elem = next;
8435 	}
8436 }
8437 
8438 
8439 static void
8440 fcp_update_offline_flags(struct fcp_lun *plun)
8441 {
8442 	struct fcp_port	*pptr = LUN_PORT;
8443 	ASSERT(plun != NULL);
8444 
8445 	mutex_enter(&LUN_TGT->tgt_mutex);
8446 	plun->lun_state |= FCP_LUN_OFFLINE;
8447 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8448 
8449 	mutex_enter(&plun->lun_mutex);
8450 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8451 		dev_info_t *cdip = NULL;
8452 
8453 		mutex_exit(&LUN_TGT->tgt_mutex);
8454 
8455 		if (plun->lun_mpxio == 0) {
8456 			cdip = DIP(plun->lun_cip);
8457 		} else if (plun->lun_cip) {
8458 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8459 		}
8460 
8461 		mutex_exit(&plun->lun_mutex);
8462 		if (cdip) {
8463 			(void) ndi_event_retrieve_cookie(
8464 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8465 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8466 			(void) ndi_event_run_callbacks(
8467 			    pptr->port_ndi_event_hdl, cdip,
8468 			    fcp_remove_eid, NULL);
8469 		}
8470 	} else {
8471 		mutex_exit(&plun->lun_mutex);
8472 		mutex_exit(&LUN_TGT->tgt_mutex);
8473 	}
8474 }
8475 
8476 
8477 /*
8478  * Scan all of the command pkts for this port, moving pkts that
8479  * match our LUN onto our own list (headed by "head")
8480  */
8481 static struct fcp_pkt *
8482 fcp_scan_commands(struct fcp_lun *plun)
8483 {
8484 	struct fcp_port	*pptr = LUN_PORT;
8485 
8486 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8487 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8488 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8489 
8490 	struct fcp_pkt	*head = NULL;	/* head of our list */
8491 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8492 
8493 	int			cmds_found = 0;
8494 
8495 	mutex_enter(&pptr->port_pkt_mutex);
8496 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8497 		struct fcp_lun *tlun =
8498 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8499 
8500 		ncmd = cmd->cmd_next;	/* set next command */
8501 
8502 		/*
8503 		 * if this pkt is for a different LUN  or the
8504 		 * command is sent down, skip it.
8505 		 */
8506 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8507 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8508 			pcmd = cmd;
8509 			continue;
8510 		}
8511 		cmds_found++;
8512 		if (pcmd != NULL) {
8513 			ASSERT(pptr->port_pkt_head != cmd);
8514 			pcmd->cmd_next = cmd->cmd_next;
8515 		} else {
8516 			ASSERT(cmd == pptr->port_pkt_head);
8517 			pptr->port_pkt_head = cmd->cmd_next;
8518 		}
8519 
8520 		if (cmd == pptr->port_pkt_tail) {
8521 			pptr->port_pkt_tail = pcmd;
8522 			if (pcmd) {
8523 				pcmd->cmd_next = NULL;
8524 			}
8525 		}
8526 
8527 		if (head == NULL) {
8528 			head = tail = cmd;
8529 		} else {
8530 			ASSERT(tail != NULL);
8531 
8532 			tail->cmd_next = cmd;
8533 			tail = cmd;
8534 		}
8535 		cmd->cmd_next = NULL;
8536 	}
8537 	mutex_exit(&pptr->port_pkt_mutex);
8538 
8539 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8540 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8541 	    "scan commands: %d cmd(s) found", cmds_found);
8542 
8543 	return (head);
8544 }
8545 
8546 
8547 /*
8548  * Abort all the commands in the command queue
8549  */
8550 static void
8551 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8552 {
8553 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8554 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8555 
8556 	ASSERT(mutex_owned(&pptr->port_mutex));
8557 
8558 	/* scan through the pkts and invalid them */
8559 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8560 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8561 
8562 		ncmd = cmd->cmd_next;
8563 		ASSERT(pkt != NULL);
8564 
8565 		/*
8566 		 * The lun is going to be marked offline. Indicate
8567 		 * the target driver not to requeue or retry this command
8568 		 * as the device is going to be offlined pretty soon.
8569 		 */
8570 		pkt->pkt_reason = CMD_DEV_GONE;
8571 		pkt->pkt_statistics = 0;
8572 		pkt->pkt_state = 0;
8573 
8574 		/* reset cmd flags/state */
8575 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8576 		cmd->cmd_state = FCP_PKT_IDLE;
8577 
8578 		/*
8579 		 * ensure we have a packet completion routine,
8580 		 * then call it.
8581 		 */
8582 		ASSERT(pkt->pkt_comp != NULL);
8583 
8584 		mutex_exit(&pptr->port_mutex);
8585 		fcp_post_callback(cmd);
8586 		mutex_enter(&pptr->port_mutex);
8587 	}
8588 }
8589 
8590 
8591 /*
8592  * the pkt_comp callback for command packets
8593  */
8594 static void
8595 fcp_cmd_callback(fc_packet_t *fpkt)
8596 {
8597 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8598 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8599 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8600 
8601 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8602 
8603 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8604 		cmn_err(CE_PANIC, "Packet already completed %p",
8605 		    (void *)cmd);
8606 	}
8607 
8608 	/*
8609 	 * Watch thread should be freeing the packet, ignore the pkt.
8610 	 */
8611 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8612 		fcp_log(CE_CONT, pptr->port_dip,
8613 		    "!FCP: Pkt completed while aborting\n");
8614 		return;
8615 	}
8616 	cmd->cmd_state = FCP_PKT_IDLE;
8617 
8618 	fcp_complete_pkt(fpkt);
8619 
8620 #ifdef	DEBUG
8621 	mutex_enter(&pptr->port_pkt_mutex);
8622 	pptr->port_npkts--;
8623 	mutex_exit(&pptr->port_pkt_mutex);
8624 #endif /* DEBUG */
8625 
8626 	fcp_post_callback(cmd);
8627 }
8628 
8629 
8630 static void
8631 fcp_complete_pkt(fc_packet_t *fpkt)
8632 {
8633 	int			error = 0;
8634 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8635 	    fpkt->pkt_ulp_private;
8636 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8637 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8638 	struct fcp_lun	*plun;
8639 	struct fcp_tgt	*ptgt;
8640 	struct fcp_rsp		*rsp;
8641 	struct scsi_address	save;
8642 
8643 #ifdef	DEBUG
8644 	save = pkt->pkt_address;
8645 #endif /* DEBUG */
8646 
8647 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8648 
8649 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8650 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8651 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8652 			    sizeof (struct fcp_rsp));
8653 		}
8654 
8655 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8656 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8657 
8658 		pkt->pkt_resid = 0;
8659 
8660 		if (cmd->cmd_pkt->pkt_numcookies) {
8661 			pkt->pkt_state |= STATE_XFERRED_DATA;
8662 			if (fpkt->pkt_data_resid) {
8663 				error++;
8664 			}
8665 		}
8666 
8667 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8668 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8669 			/*
8670 			 * The next two checks make sure that if there
8671 			 * is no sense data or a valid response and
8672 			 * the command came back with check condition,
8673 			 * the command should be retried.
8674 			 */
8675 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8676 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8677 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8678 				pkt->pkt_resid = cmd->cmd_dmacount;
8679 			}
8680 		}
8681 
8682 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8683 			return;
8684 		}
8685 
8686 		plun = ADDR2LUN(&pkt->pkt_address);
8687 		ptgt = plun->lun_tgt;
8688 		ASSERT(ptgt != NULL);
8689 
8690 		/*
8691 		 * Update the transfer resid, if appropriate
8692 		 */
8693 		if (rsp->fcp_u.fcp_status.resid_over ||
8694 		    rsp->fcp_u.fcp_status.resid_under) {
8695 			pkt->pkt_resid = rsp->fcp_resid;
8696 		}
8697 
8698 		/*
8699 		 * First see if we got a FCP protocol error.
8700 		 */
8701 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8702 			struct fcp_rsp_info	*bep;
8703 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8704 			    sizeof (struct fcp_rsp));
8705 
8706 			if (fcp_validate_fcp_response(rsp, pptr) !=
8707 			    FC_SUCCESS) {
8708 				pkt->pkt_reason = CMD_CMPLT;
8709 				*(pkt->pkt_scbp) = STATUS_CHECK;
8710 
8711 				fcp_log(CE_WARN, pptr->port_dip,
8712 				    "!SCSI command to d_id=0x%x lun=0x%x"
8713 				    " failed, Bad FCP response values:"
8714 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8715 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8716 				    ptgt->tgt_d_id, plun->lun_num,
8717 				    rsp->reserved_0, rsp->reserved_1,
8718 				    rsp->fcp_u.fcp_status.reserved_0,
8719 				    rsp->fcp_u.fcp_status.reserved_1,
8720 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8721 
8722 				return;
8723 			}
8724 
8725 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8726 				FCP_CP_IN(fpkt->pkt_resp +
8727 				    sizeof (struct fcp_rsp), bep,
8728 				    fpkt->pkt_resp_acc,
8729 				    sizeof (struct fcp_rsp_info));
8730 			}
8731 
8732 			if (bep->rsp_code != FCP_NO_FAILURE) {
8733 				child_info_t	*cip;
8734 
8735 				pkt->pkt_reason = CMD_TRAN_ERR;
8736 
8737 				mutex_enter(&plun->lun_mutex);
8738 				cip = plun->lun_cip;
8739 				mutex_exit(&plun->lun_mutex);
8740 
8741 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8742 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8743 				    "FCP response error on cmd=%p"
8744 				    " target=0x%x, cip=%p", cmd,
8745 				    ptgt->tgt_d_id, cip);
8746 			}
8747 		}
8748 
8749 		/*
8750 		 * See if we got a SCSI error with sense data
8751 		 */
8752 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8753 			uchar_t				rqlen;
8754 			caddr_t				sense_from;
8755 			child_info_t			*cip;
8756 			timeout_id_t			tid;
8757 			struct scsi_arq_status		*arq;
8758 			struct scsi_extended_sense	*sense_to;
8759 
8760 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8761 			sense_to = &arq->sts_sensedata;
8762 
8763 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8764 			    sizeof (struct scsi_extended_sense));
8765 
8766 			sense_from = (caddr_t)fpkt->pkt_resp +
8767 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8768 
8769 			if (fcp_validate_fcp_response(rsp, pptr) !=
8770 			    FC_SUCCESS) {
8771 				pkt->pkt_reason = CMD_CMPLT;
8772 				*(pkt->pkt_scbp) = STATUS_CHECK;
8773 
8774 				fcp_log(CE_WARN, pptr->port_dip,
8775 				    "!SCSI command to d_id=0x%x lun=0x%x"
8776 				    " failed, Bad FCP response values:"
8777 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8778 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8779 				    ptgt->tgt_d_id, plun->lun_num,
8780 				    rsp->reserved_0, rsp->reserved_1,
8781 				    rsp->fcp_u.fcp_status.reserved_0,
8782 				    rsp->fcp_u.fcp_status.reserved_1,
8783 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8784 
8785 				return;
8786 			}
8787 
8788 			/*
8789 			 * copy in sense information
8790 			 */
8791 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8792 				FCP_CP_IN(sense_from, sense_to,
8793 				    fpkt->pkt_resp_acc, rqlen);
8794 			} else {
8795 				bcopy(sense_from, sense_to, rqlen);
8796 			}
8797 
8798 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8799 			    (FCP_SENSE_NO_LUN(sense_to))) {
8800 				mutex_enter(&ptgt->tgt_mutex);
8801 				if (ptgt->tgt_tid == NULL) {
8802 					/*
8803 					 * Kick off rediscovery
8804 					 */
8805 					tid = timeout(fcp_reconfigure_luns,
8806 					    (caddr_t)ptgt, drv_usectohz(1));
8807 
8808 					ptgt->tgt_tid = tid;
8809 					ptgt->tgt_state |= FCP_TGT_BUSY;
8810 				}
8811 				mutex_exit(&ptgt->tgt_mutex);
8812 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8813 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8814 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8815 					    "!FCP: Report Lun Has Changed"
8816 					    " target=%x", ptgt->tgt_d_id);
8817 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8818 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8819 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8820 					    "!FCP: LU Not Supported"
8821 					    " target=%x", ptgt->tgt_d_id);
8822 				}
8823 			}
8824 			ASSERT(pkt->pkt_scbp != NULL);
8825 
8826 			pkt->pkt_state |= STATE_ARQ_DONE;
8827 
8828 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8829 
8830 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8831 			arq->sts_rqpkt_reason = 0;
8832 			arq->sts_rqpkt_statistics = 0;
8833 
8834 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8835 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8836 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8837 			    STATE_XFERRED_DATA;
8838 
8839 			mutex_enter(&plun->lun_mutex);
8840 			cip = plun->lun_cip;
8841 			mutex_exit(&plun->lun_mutex);
8842 
8843 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8844 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8845 			    "SCSI Check condition on cmd=%p target=0x%x"
8846 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8847 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8848 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8849 			    rsp->fcp_u.fcp_status.scsi_status,
8850 			    sense_to->es_key, sense_to->es_add_code,
8851 			    sense_to->es_qual_code);
8852 		}
8853 	} else {
8854 		plun = ADDR2LUN(&pkt->pkt_address);
8855 		ptgt = plun->lun_tgt;
8856 		ASSERT(ptgt != NULL);
8857 
8858 		/*
8859 		 * Work harder to translate errors into target driver
8860 		 * understandable ones. Note with despair that the target
8861 		 * drivers don't decode pkt_state and pkt_reason exhaustively
8862 		 * They resort to using the big hammer most often, which
8863 		 * may not get fixed in the life time of this driver.
8864 		 */
8865 		pkt->pkt_state = 0;
8866 		pkt->pkt_statistics = 0;
8867 
8868 		switch (fpkt->pkt_state) {
8869 		case FC_PKT_TRAN_ERROR:
8870 			switch (fpkt->pkt_reason) {
8871 			case FC_REASON_OVERRUN:
8872 				pkt->pkt_reason = CMD_CMD_OVR;
8873 				pkt->pkt_statistics |= STAT_ABORTED;
8874 				break;
8875 
8876 			case FC_REASON_XCHG_BSY: {
8877 				caddr_t ptr;
8878 
8879 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8880 
8881 				ptr = (caddr_t)pkt->pkt_scbp;
8882 				if (ptr) {
8883 					*ptr = STATUS_BUSY;
8884 				}
8885 				break;
8886 			}
8887 
8888 			case FC_REASON_ABORTED:
8889 				pkt->pkt_reason = CMD_TRAN_ERR;
8890 				pkt->pkt_statistics |= STAT_ABORTED;
8891 				break;
8892 
8893 			case FC_REASON_ABORT_FAILED:
8894 				pkt->pkt_reason = CMD_ABORT_FAIL;
8895 				break;
8896 
8897 			case FC_REASON_NO_SEQ_INIT:
8898 			case FC_REASON_CRC_ERROR:
8899 				pkt->pkt_reason = CMD_TRAN_ERR;
8900 				pkt->pkt_statistics |= STAT_ABORTED;
8901 				break;
8902 			default:
8903 				pkt->pkt_reason = CMD_TRAN_ERR;
8904 				break;
8905 			}
8906 			break;
8907 
8908 		case FC_PKT_PORT_OFFLINE: {
8909 			dev_info_t	*cdip = NULL;
8910 			caddr_t		ptr;
8911 
8912 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
8913 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8914 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
8915 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
8916 				    ptgt->tgt_d_id);
8917 			}
8918 
8919 			mutex_enter(&plun->lun_mutex);
8920 			if (plun->lun_mpxio == 0) {
8921 				cdip = DIP(plun->lun_cip);
8922 			} else if (plun->lun_cip) {
8923 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8924 			}
8925 
8926 			mutex_exit(&plun->lun_mutex);
8927 
8928 			if (cdip) {
8929 				(void) ndi_event_retrieve_cookie(
8930 				    pptr->port_ndi_event_hdl, cdip,
8931 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
8932 				    NDI_EVENT_NOPASS);
8933 				(void) ndi_event_run_callbacks(
8934 				    pptr->port_ndi_event_hdl, cdip,
8935 				    fcp_remove_eid, NULL);
8936 			}
8937 
8938 			/*
8939 			 * If the link goes off-line for a lip,
8940 			 * this will cause a error to the ST SG
8941 			 * SGEN drivers. By setting BUSY we will
8942 			 * give the drivers the chance to retry
8943 			 * before it blows of the job. ST will
8944 			 * remember how many times it has retried.
8945 			 */
8946 
8947 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
8948 			    (plun->lun_type == DTYPE_CHANGER)) {
8949 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8950 				ptr = (caddr_t)pkt->pkt_scbp;
8951 				if (ptr) {
8952 					*ptr = STATUS_BUSY;
8953 				}
8954 			} else {
8955 				pkt->pkt_reason = CMD_TRAN_ERR;
8956 				pkt->pkt_statistics |= STAT_BUS_RESET;
8957 			}
8958 			break;
8959 		}
8960 
8961 		case FC_PKT_TRAN_BSY:
8962 			/*
8963 			 * Use the ssd Qfull handling here.
8964 			 */
8965 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
8966 			pkt->pkt_state = STATE_GOT_BUS;
8967 			break;
8968 
8969 		case FC_PKT_TIMEOUT:
8970 			pkt->pkt_reason = CMD_TIMEOUT;
8971 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
8972 				pkt->pkt_statistics |= STAT_TIMEOUT;
8973 			} else {
8974 				pkt->pkt_statistics |= STAT_ABORTED;
8975 			}
8976 			break;
8977 
8978 		case FC_PKT_LOCAL_RJT:
8979 			switch (fpkt->pkt_reason) {
8980 			case FC_REASON_OFFLINE: {
8981 				dev_info_t	*cdip = NULL;
8982 
8983 				mutex_enter(&plun->lun_mutex);
8984 				if (plun->lun_mpxio == 0) {
8985 					cdip = DIP(plun->lun_cip);
8986 				} else if (plun->lun_cip) {
8987 					cdip = mdi_pi_get_client(
8988 					    PIP(plun->lun_cip));
8989 				}
8990 				mutex_exit(&plun->lun_mutex);
8991 
8992 				if (cdip) {
8993 					(void) ndi_event_retrieve_cookie(
8994 					    pptr->port_ndi_event_hdl, cdip,
8995 					    FCAL_REMOVE_EVENT,
8996 					    &fcp_remove_eid,
8997 					    NDI_EVENT_NOPASS);
8998 					(void) ndi_event_run_callbacks(
8999 					    pptr->port_ndi_event_hdl,
9000 					    cdip, fcp_remove_eid, NULL);
9001 				}
9002 
9003 				pkt->pkt_reason = CMD_TRAN_ERR;
9004 				pkt->pkt_statistics |= STAT_BUS_RESET;
9005 
9006 				break;
9007 			}
9008 
9009 			case FC_REASON_NOMEM:
9010 			case FC_REASON_QFULL: {
9011 				caddr_t ptr;
9012 
9013 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9014 				ptr = (caddr_t)pkt->pkt_scbp;
9015 				if (ptr) {
9016 					*ptr = STATUS_BUSY;
9017 				}
9018 				break;
9019 			}
9020 
9021 			case FC_REASON_DMA_ERROR:
9022 				pkt->pkt_reason = CMD_DMA_DERR;
9023 				pkt->pkt_statistics |= STAT_ABORTED;
9024 				break;
9025 
9026 			case FC_REASON_CRC_ERROR:
9027 			case FC_REASON_UNDERRUN: {
9028 				uchar_t		status;
9029 				/*
9030 				 * Work around for Bugid: 4240945.
9031 				 * IB on A5k doesn't set the Underrun bit
9032 				 * in the fcp status, when it is transferring
9033 				 * less than requested amount of data. Work
9034 				 * around the ses problem to keep luxadm
9035 				 * happy till ibfirmware is fixed.
9036 				 */
9037 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9038 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9039 					    fpkt->pkt_resp_acc,
9040 					    sizeof (struct fcp_rsp));
9041 				}
9042 				status = rsp->fcp_u.fcp_status.scsi_status;
9043 				if (((plun->lun_type & DTYPE_MASK) ==
9044 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9045 					pkt->pkt_reason = CMD_CMPLT;
9046 					*pkt->pkt_scbp = status;
9047 					pkt->pkt_resid = 0;
9048 				} else {
9049 					pkt->pkt_reason = CMD_TRAN_ERR;
9050 					pkt->pkt_statistics |= STAT_ABORTED;
9051 				}
9052 				break;
9053 			}
9054 
9055 			case FC_REASON_NO_CONNECTION:
9056 			case FC_REASON_UNSUPPORTED:
9057 			case FC_REASON_ILLEGAL_REQ:
9058 			case FC_REASON_BAD_SID:
9059 			case FC_REASON_DIAG_BUSY:
9060 			case FC_REASON_FCAL_OPN_FAIL:
9061 			case FC_REASON_BAD_XID:
9062 			default:
9063 				pkt->pkt_reason = CMD_TRAN_ERR;
9064 				pkt->pkt_statistics |= STAT_ABORTED;
9065 				break;
9066 
9067 			}
9068 			break;
9069 
9070 		case FC_PKT_NPORT_RJT:
9071 		case FC_PKT_FABRIC_RJT:
9072 		case FC_PKT_NPORT_BSY:
9073 		case FC_PKT_FABRIC_BSY:
9074 		default:
9075 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9076 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9077 			    "FC Status 0x%x, reason 0x%x",
9078 			    fpkt->pkt_state, fpkt->pkt_reason);
9079 			pkt->pkt_reason = CMD_TRAN_ERR;
9080 			pkt->pkt_statistics |= STAT_ABORTED;
9081 			break;
9082 		}
9083 
9084 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9085 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9086 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9087 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9088 		    fpkt->pkt_reason);
9089 	}
9090 
9091 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9092 }
9093 
9094 
9095 static int
9096 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9097 {
9098 	if (rsp->reserved_0 || rsp->reserved_1 ||
9099 	    rsp->fcp_u.fcp_status.reserved_0 ||
9100 	    rsp->fcp_u.fcp_status.reserved_1) {
9101 		/*
9102 		 * These reserved fields should ideally be zero. FCP-2 does say
9103 		 * that the recipient need not check for reserved fields to be
9104 		 * zero. If they are not zero, we will not make a fuss about it
9105 		 * - just log it (in debug to both trace buffer and messages
9106 		 * file and to trace buffer only in non-debug) and move on.
9107 		 *
9108 		 * Non-zero reserved fields were seen with minnows.
9109 		 *
9110 		 * qlc takes care of some of this but we cannot assume that all
9111 		 * FCAs will do so.
9112 		 */
9113 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9114 		    FCP_BUF_LEVEL_5, 0,
9115 		    "Got fcp response packet with non-zero reserved fields "
9116 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9117 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9118 		    rsp->reserved_0, rsp->reserved_1,
9119 		    rsp->fcp_u.fcp_status.reserved_0,
9120 		    rsp->fcp_u.fcp_status.reserved_1);
9121 	}
9122 
9123 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9124 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9125 		return (FC_FAILURE);
9126 	}
9127 
9128 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9129 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9130 	    sizeof (struct fcp_rsp))) {
9131 		return (FC_FAILURE);
9132 	}
9133 
9134 	return (FC_SUCCESS);
9135 }
9136 
9137 
9138 /*
9139  * This is called when there is a change the in device state. The case we're
9140  * handling here is, if the d_id s does not match, offline this tgt and online
9141  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9142  * port_mutex held.
9143  */
9144 static int
9145 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9146     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9147 {
9148 	ASSERT(mutex_owned(&pptr->port_mutex));
9149 
9150 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9151 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9152 	    "Starting fcp_device_changed...");
9153 
9154 	/*
9155 	 * The two cases where the port_device_changed is called is
9156 	 * either it changes it's d_id or it's hard address.
9157 	 */
9158 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9159 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9160 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9161 
9162 		/* offline this target */
9163 		mutex_enter(&ptgt->tgt_mutex);
9164 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9165 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9166 			    0, 1, NDI_DEVI_REMOVE);
9167 		}
9168 		mutex_exit(&ptgt->tgt_mutex);
9169 
9170 		fcp_log(CE_NOTE, pptr->port_dip,
9171 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9172 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9173 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9174 		    map_entry->map_hard_addr.hard_addr);
9175 	}
9176 
9177 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9178 	    link_cnt, tgt_cnt, cause));
9179 }
9180 
9181 /*
9182  *     Function: fcp_alloc_lun
9183  *
9184  *  Description: Creates a new lun structure and adds it to the list
9185  *		 of luns of the target.
9186  *
9187  *     Argument: ptgt		Target the lun will belong to.
9188  *
9189  * Return Value: NULL		Failed
9190  *		 Not NULL	Succeeded
9191  *
9192  *	Context: Kernel context
9193  */
9194 static struct fcp_lun *
9195 fcp_alloc_lun(struct fcp_tgt *ptgt)
9196 {
9197 	struct fcp_lun *plun;
9198 
9199 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9200 	if (plun != NULL) {
9201 		/*
9202 		 * Initialize the mutex before putting in the target list
9203 		 * especially before releasing the target mutex.
9204 		 */
9205 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9206 		plun->lun_tgt = ptgt;
9207 
9208 		mutex_enter(&ptgt->tgt_mutex);
9209 		plun->lun_next = ptgt->tgt_lun;
9210 		ptgt->tgt_lun = plun;
9211 		plun->lun_old_guid = NULL;
9212 		plun->lun_old_guid_size = 0;
9213 		mutex_exit(&ptgt->tgt_mutex);
9214 	}
9215 
9216 	return (plun);
9217 }
9218 
9219 /*
9220  *     Function: fcp_dealloc_lun
9221  *
9222  *  Description: Frees the LUN structure passed by the caller.
9223  *
9224  *     Argument: plun		LUN structure to free.
9225  *
9226  * Return Value: None
9227  *
9228  *	Context: Kernel context.
9229  */
9230 static void
9231 fcp_dealloc_lun(struct fcp_lun *plun)
9232 {
9233 	mutex_enter(&plun->lun_mutex);
9234 	if (plun->lun_cip) {
9235 		fcp_remove_child(plun);
9236 	}
9237 	mutex_exit(&plun->lun_mutex);
9238 
9239 	mutex_destroy(&plun->lun_mutex);
9240 	if (plun->lun_guid) {
9241 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9242 	}
9243 	if (plun->lun_old_guid) {
9244 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9245 	}
9246 	kmem_free(plun, sizeof (*plun));
9247 }
9248 
9249 /*
9250  *     Function: fcp_alloc_tgt
9251  *
9252  *  Description: Creates a new target structure and adds it to the port
9253  *		 hash list.
9254  *
9255  *     Argument: pptr		fcp port structure
9256  *		 *map_entry	entry describing the target to create
9257  *		 link_cnt	Link state change counter
9258  *
9259  * Return Value: NULL		Failed
9260  *		 Not NULL	Succeeded
9261  *
9262  *	Context: Kernel context.
9263  */
9264 static struct fcp_tgt *
9265 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9266 {
9267 	int			hash;
9268 	uchar_t			*wwn;
9269 	struct fcp_tgt	*ptgt;
9270 
9271 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9272 	if (ptgt != NULL) {
9273 		mutex_enter(&pptr->port_mutex);
9274 		if (link_cnt != pptr->port_link_cnt) {
9275 			/*
9276 			 * oh oh -- another link reset
9277 			 * in progress -- give up
9278 			 */
9279 			mutex_exit(&pptr->port_mutex);
9280 			kmem_free(ptgt, sizeof (*ptgt));
9281 			ptgt = NULL;
9282 		} else {
9283 			/*
9284 			 * initialize the mutex before putting in the port
9285 			 * wwn list, especially before releasing the port
9286 			 * mutex.
9287 			 */
9288 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9289 
9290 			/* add new target entry to the port's hash list */
9291 			wwn = (uchar_t *)&map_entry->map_pwwn;
9292 			hash = FCP_HASH(wwn);
9293 
9294 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9295 			pptr->port_tgt_hash_table[hash] = ptgt;
9296 
9297 			/* save cross-ptr */
9298 			ptgt->tgt_port = pptr;
9299 
9300 			ptgt->tgt_change_cnt = 1;
9301 
9302 			/* initialize the target manual_config_only flag */
9303 			if (fcp_enable_auto_configuration) {
9304 				ptgt->tgt_manual_config_only = 0;
9305 			} else {
9306 				ptgt->tgt_manual_config_only = 1;
9307 			}
9308 
9309 			mutex_exit(&pptr->port_mutex);
9310 		}
9311 	}
9312 
9313 	return (ptgt);
9314 }
9315 
9316 /*
9317  *     Function: fcp_dealloc_tgt
9318  *
9319  *  Description: Frees the target structure passed by the caller.
9320  *
9321  *     Argument: ptgt		Target structure to free.
9322  *
9323  * Return Value: None
9324  *
9325  *	Context: Kernel context.
9326  */
9327 static void
9328 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9329 {
9330 	mutex_destroy(&ptgt->tgt_mutex);
9331 	kmem_free(ptgt, sizeof (*ptgt));
9332 }
9333 
9334 
9335 /*
9336  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9337  *
9338  *	Device discovery commands will not be retried for-ever as
9339  *	this will have repercussions on other devices that need to
9340  *	be submitted to the hotplug thread. After a quick glance
9341  *	at the SCSI-3 spec, it was found that the spec doesn't
9342  *	mandate a forever retry, rather recommends a delayed retry.
9343  *
9344  *	Since Photon IB is single threaded, STATUS_BUSY is common
9345  *	in a 4+initiator environment. Make sure the total time
9346  *	spent on retries (including command timeout) does not
9347  *	60 seconds
9348  */
9349 static void
9350 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9351 {
9352 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9353 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9354 
9355 	mutex_enter(&pptr->port_mutex);
9356 	mutex_enter(&ptgt->tgt_mutex);
9357 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9358 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9359 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9360 		    "fcp_queue_ipkt,1:state change occured"
9361 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9362 		mutex_exit(&ptgt->tgt_mutex);
9363 		mutex_exit(&pptr->port_mutex);
9364 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9365 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9366 		fcp_icmd_free(pptr, icmd);
9367 		return;
9368 	}
9369 	mutex_exit(&ptgt->tgt_mutex);
9370 
9371 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9372 
9373 	if (pptr->port_ipkt_list != NULL) {
9374 		/* add pkt to front of doubly-linked list */
9375 		pptr->port_ipkt_list->ipkt_prev = icmd;
9376 		icmd->ipkt_next = pptr->port_ipkt_list;
9377 		pptr->port_ipkt_list = icmd;
9378 		icmd->ipkt_prev = NULL;
9379 	} else {
9380 		/* this is the first/only pkt on the list */
9381 		pptr->port_ipkt_list = icmd;
9382 		icmd->ipkt_next = NULL;
9383 		icmd->ipkt_prev = NULL;
9384 	}
9385 	mutex_exit(&pptr->port_mutex);
9386 }
9387 
9388 /*
9389  *     Function: fcp_transport
9390  *
9391  *  Description: This function submits the Fibre Channel packet to the transort
9392  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9393  *		 fails the submission, the treatment depends on the value of
9394  *		 the variable internal.
9395  *
9396  *     Argument: port_handle	fp/fctl port handle.
9397  *		 *fpkt		Packet to submit to the transport layer.
9398  *		 internal	Not zero when it's an internal packet.
9399  *
9400  * Return Value: FC_TRAN_BUSY
9401  *		 FC_STATEC_BUSY
9402  *		 FC_OFFLINE
9403  *		 FC_LOGINREQ
9404  *		 FC_DEVICE_BUSY
9405  *		 FC_SUCCESS
9406  */
9407 static int
9408 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9409 {
9410 	int	rval;
9411 
9412 	rval = fc_ulp_transport(port_handle, fpkt);
9413 	if (rval == FC_SUCCESS) {
9414 		return (rval);
9415 	}
9416 
9417 	/*
9418 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9419 	 * a command, if the underlying modules see that there is a state
9420 	 * change, or if a port is OFFLINE, that means, that state change
9421 	 * hasn't reached FCP yet, so re-queue the command for deferred
9422 	 * submission.
9423 	 */
9424 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9425 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9426 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9427 		/*
9428 		 * Defer packet re-submission. Life hang is possible on
9429 		 * internal commands if the port driver sends FC_STATEC_BUSY
9430 		 * for ever, but that shouldn't happen in a good environment.
9431 		 * Limiting re-transport for internal commands is probably a
9432 		 * good idea..
9433 		 * A race condition can happen when a port sees barrage of
9434 		 * link transitions offline to online. If the FCTL has
9435 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9436 		 * internal commands should be queued to do the discovery.
9437 		 * The race condition is when an online comes and FCP starts
9438 		 * its internal discovery and the link goes offline. It is
9439 		 * possible that the statec_callback has not reached FCP
9440 		 * and FCP is carrying on with its internal discovery.
9441 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9442 		 * that the link has gone offline. At this point FCP should
9443 		 * drop all the internal commands and wait for the
9444 		 * statec_callback. It will be facilitated by incrementing
9445 		 * port_link_cnt.
9446 		 *
9447 		 * For external commands, the (FC)pkt_timeout is decremented
9448 		 * by the QUEUE Delay added by our driver, Care is taken to
9449 		 * ensure that it doesn't become zero (zero means no timeout)
9450 		 * If the time expires right inside driver queue itself,
9451 		 * the watch thread will return it to the original caller
9452 		 * indicating that the command has timed-out.
9453 		 */
9454 		if (internal) {
9455 			char			*op;
9456 			struct fcp_ipkt	*icmd;
9457 
9458 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9459 			switch (icmd->ipkt_opcode) {
9460 			case SCMD_REPORT_LUN:
9461 				op = "REPORT LUN";
9462 				break;
9463 
9464 			case SCMD_INQUIRY:
9465 				op = "INQUIRY";
9466 				break;
9467 
9468 			case SCMD_INQUIRY_PAGE83:
9469 				op = "INQUIRY-83";
9470 				break;
9471 
9472 			default:
9473 				op = "Internal SCSI COMMAND";
9474 				break;
9475 			}
9476 
9477 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9478 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9479 				rval = FC_SUCCESS;
9480 			}
9481 		} else {
9482 			struct fcp_pkt *cmd;
9483 			struct fcp_port *pptr;
9484 
9485 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9486 			cmd->cmd_state = FCP_PKT_IDLE;
9487 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9488 
9489 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9490 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9491 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9492 				    "fcp_transport: xport busy for pkt %p",
9493 				    cmd->cmd_pkt);
9494 				rval = FC_TRAN_BUSY;
9495 			} else {
9496 				fcp_queue_pkt(pptr, cmd);
9497 				rval = FC_SUCCESS;
9498 			}
9499 		}
9500 	}
9501 
9502 	return (rval);
9503 }
9504 
9505 /*VARARGS3*/
9506 static void
9507 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9508 {
9509 	char		buf[256];
9510 	va_list		ap;
9511 
9512 	if (dip == NULL) {
9513 		dip = fcp_global_dip;
9514 	}
9515 
9516 	va_start(ap, fmt);
9517 	(void) vsprintf(buf, fmt, ap);
9518 	va_end(ap);
9519 
9520 	scsi_log(dip, "fcp", level, buf);
9521 }
9522 
9523 /*
9524  * This function retries NS registry of FC4 type.
9525  * It assumes that fcp_mutex is held.
9526  * The function does nothing if topology is not fabric
9527  * So, the topology has to be set before this function can be called
9528  */
9529 static void
9530 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9531 {
9532 	int	rval;
9533 
9534 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9535 
9536 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9537 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9538 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9539 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9540 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9541 		}
9542 		return;
9543 	}
9544 	mutex_exit(&pptr->port_mutex);
9545 	rval = fcp_do_ns_registry(pptr, s_id);
9546 	mutex_enter(&pptr->port_mutex);
9547 
9548 	if (rval == 0) {
9549 		/* Registry successful. Reset flag */
9550 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9551 	}
9552 }
9553 
9554 /*
9555  * This function registers the ULP with the switch by calling transport i/f
9556  */
9557 static int
9558 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9559 {
9560 	fc_ns_cmd_t		ns_cmd;
9561 	ns_rfc_type_t		rfc;
9562 	uint32_t		types[8];
9563 
9564 	/*
9565 	 * Prepare the Name server structure to
9566 	 * register with the transport in case of
9567 	 * Fabric configuration.
9568 	 */
9569 	bzero(&rfc, sizeof (rfc));
9570 	bzero(types, sizeof (types));
9571 
9572 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9573 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9574 
9575 	rfc.rfc_port_id.port_id = s_id;
9576 	bcopy(types, rfc.rfc_types, sizeof (types));
9577 
9578 	ns_cmd.ns_flags = 0;
9579 	ns_cmd.ns_cmd = NS_RFT_ID;
9580 	ns_cmd.ns_req_len = sizeof (rfc);
9581 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9582 	ns_cmd.ns_resp_len = 0;
9583 	ns_cmd.ns_resp_payload = NULL;
9584 
9585 	/*
9586 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9587 	 */
9588 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9589 		fcp_log(CE_WARN, pptr->port_dip,
9590 		    "!ns_registry: failed name server registration");
9591 		return (1);
9592 	}
9593 
9594 	return (0);
9595 }
9596 
9597 /*
9598  *     Function: fcp_handle_port_attach
9599  *
9600  *  Description: This function is called from fcp_port_attach() to attach a
9601  *		 new port. This routine does the following:
9602  *
9603  *		1) Allocates an fcp_port structure and initializes it.
9604  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9605  *		   server.
9606  *		3) Kicks off the enumeration of the targets/luns visible
9607  *		   through this new port.  That is done by calling
9608  *		   fcp_statec_callback() if the port is online.
9609  *
9610  *     Argument: ulph		fp/fctl port handle.
9611  *		 *pinfo		Port information.
9612  *		 s_id		Port ID.
9613  *		 instance	Device instance number for the local port
9614  *				(returned by ddi_get_instance()).
9615  *
9616  * Return Value: DDI_SUCCESS
9617  *		 DDI_FAILURE
9618  *
9619  *	Context: User and Kernel context.
9620  */
9621 /*ARGSUSED*/
9622 int
9623 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9624     uint32_t s_id, int instance)
9625 {
9626 	int			res = DDI_FAILURE;
9627 	scsi_hba_tran_t		*tran;
9628 	int			mutex_initted = FALSE;
9629 	int			hba_attached = FALSE;
9630 	int			soft_state_linked = FALSE;
9631 	int			event_bind = FALSE;
9632 	struct fcp_port		*pptr;
9633 	fc_portmap_t		*tmp_list = NULL;
9634 	uint32_t		max_cnt, alloc_cnt;
9635 	uchar_t			*boot_wwn = NULL;
9636 	uint_t			nbytes;
9637 	int			manual_cfg;
9638 
9639 	/*
9640 	 * this port instance attaching for the first time (or after
9641 	 * being detached before)
9642 	 */
9643 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9644 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9645 
9646 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9647 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9648 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9649 		    instance);
9650 		return (res);
9651 	}
9652 
9653 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9654 		/* this shouldn't happen */
9655 		ddi_soft_state_free(fcp_softstate, instance);
9656 		cmn_err(CE_WARN, "fcp: bad soft state");
9657 		return (res);
9658 	}
9659 
9660 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9661 
9662 	/*
9663 	 * Make a copy of ulp_port_info as fctl allocates
9664 	 * a temp struct.
9665 	 */
9666 	(void) fcp_cp_pinfo(pptr, pinfo);
9667 
9668 	/*
9669 	 * Check for manual_configuration_only property.
9670 	 * Enable manual configurtion if the property is
9671 	 * set to 1, otherwise disable manual configuration.
9672 	 */
9673 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9674 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9675 	    MANUAL_CFG_ONLY,
9676 	    -1)) != -1) {
9677 		if (manual_cfg == 1) {
9678 			char	*pathname;
9679 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9680 			(void) ddi_pathname(pptr->port_dip, pathname);
9681 			cmn_err(CE_NOTE,
9682 			    "%s (%s%d) %s is enabled via %s.conf.",
9683 			    pathname,
9684 			    ddi_driver_name(pptr->port_dip),
9685 			    ddi_get_instance(pptr->port_dip),
9686 			    MANUAL_CFG_ONLY,
9687 			    ddi_driver_name(pptr->port_dip));
9688 			fcp_enable_auto_configuration = 0;
9689 			kmem_free(pathname, MAXPATHLEN);
9690 		}
9691 	}
9692 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9693 	pptr->port_link_cnt = 1;
9694 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9695 	pptr->port_id = s_id;
9696 	pptr->port_instance = instance;
9697 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state))
9698 	pptr->port_state = FCP_STATE_INIT;
9699 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state))
9700 
9701 	pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen *
9702 	    sizeof (ddi_dma_cookie_t));
9703 
9704 	/*
9705 	 * The two mutexes of fcp_port are initialized.	 The variable
9706 	 * mutex_initted is incremented to remember that fact.	That variable
9707 	 * is checked when the routine fails and the mutexes have to be
9708 	 * destroyed.
9709 	 */
9710 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9711 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9712 	mutex_initted++;
9713 
9714 	/*
9715 	 * The SCSI tran structure is allocate and initialized now.
9716 	 */
9717 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9718 		fcp_log(CE_WARN, pptr->port_dip,
9719 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9720 		goto fail;
9721 	}
9722 
9723 	/* link in the transport structure then fill it in */
9724 	pptr->port_tran = tran;
9725 	tran->tran_hba_private		= pptr;
9726 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9727 	tran->tran_tgt_probe		= NULL;
9728 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9729 	tran->tran_start		= fcp_scsi_start;
9730 	tran->tran_reset		= fcp_scsi_reset;
9731 	tran->tran_abort		= fcp_scsi_abort;
9732 	tran->tran_getcap		= fcp_scsi_getcap;
9733 	tran->tran_setcap		= fcp_scsi_setcap;
9734 	tran->tran_init_pkt		= NULL;
9735 	tran->tran_destroy_pkt		= NULL;
9736 	tran->tran_dmafree		= NULL;
9737 	tran->tran_sync_pkt		= NULL;
9738 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9739 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9740 	tran->tran_get_name		= fcp_scsi_get_name;
9741 	tran->tran_clear_aca		= NULL;
9742 	tran->tran_clear_task_set	= NULL;
9743 	tran->tran_terminate_task	= NULL;
9744 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9745 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9746 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9747 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9748 	tran->tran_quiesce		= NULL;
9749 	tran->tran_unquiesce		= NULL;
9750 	tran->tran_bus_reset		= NULL;
9751 	tran->tran_bus_config		= fcp_scsi_bus_config;
9752 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9753 	tran->tran_bus_power		= NULL;
9754 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9755 
9756 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9757 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9758 	tran->tran_setup_pkt		= fcp_pkt_setup;
9759 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9760 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9761 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9762 
9763 	/*
9764 	 * Allocate an ndi event handle
9765 	 */
9766 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9767 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9768 
9769 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9770 	    sizeof (fcp_ndi_event_defs));
9771 
9772 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9773 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9774 
9775 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9776 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9777 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9778 
9779 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9780 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9781 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9782 		goto fail;
9783 	}
9784 	event_bind++;	/* Checked in fail case */
9785 
9786 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9787 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9788 	    != DDI_SUCCESS) {
9789 		fcp_log(CE_WARN, pptr->port_dip,
9790 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9791 		goto fail;
9792 	}
9793 	hba_attached++;	/* Checked in fail case */
9794 
9795 	pptr->port_mpxio = 0;
9796 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9797 	    MDI_SUCCESS) {
9798 		pptr->port_mpxio++;
9799 	}
9800 
9801 	/*
9802 	 * The following code is putting the new port structure in the global
9803 	 * list of ports and, if it is the first port to attach, it start the
9804 	 * fcp_watchdog_tick.
9805 	 *
9806 	 * Why put this new port in the global before we are done attaching it?
9807 	 * We are actually making the structure globally known before we are
9808 	 * done attaching it.  The reason for that is: because of the code that
9809 	 * follows.  At this point the resources to handle the port are
9810 	 * allocated.  This function is now going to do the following:
9811 	 *
9812 	 *   1) It is going to try to register with the name server advertizing
9813 	 *	the new FCP capability of the port.
9814 	 *   2) It is going to play the role of the fp/fctl layer by building
9815 	 *	a list of worlwide names reachable through this port and call
9816 	 *	itself on fcp_statec_callback().  That requires the port to
9817 	 *	be part of the global list.
9818 	 */
9819 	mutex_enter(&fcp_global_mutex);
9820 	if (fcp_port_head == NULL) {
9821 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9822 	}
9823 	pptr->port_next = fcp_port_head;
9824 	fcp_port_head = pptr;
9825 	soft_state_linked++;
9826 
9827 	if (fcp_watchdog_init++ == 0) {
9828 		fcp_watchdog_tick = fcp_watchdog_timeout *
9829 		    drv_usectohz(1000000);
9830 		fcp_watchdog_id = timeout(fcp_watch, NULL,
9831 		    fcp_watchdog_tick);
9832 	}
9833 	mutex_exit(&fcp_global_mutex);
9834 
9835 	/*
9836 	 * Here an attempt is made to register with the name server, the new
9837 	 * FCP capability.  That is done using an RTF_ID to the name server.
9838 	 * It is done synchronously.  The function fcp_do_ns_registry()
9839 	 * doesn't return till the name server responded.
9840 	 * On failures, just ignore it for now and it will get retried during
9841 	 * state change callbacks. We'll set a flag to show this failure
9842 	 */
9843 	if (fcp_do_ns_registry(pptr, s_id)) {
9844 		mutex_enter(&pptr->port_mutex);
9845 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9846 		mutex_exit(&pptr->port_mutex);
9847 	} else {
9848 		mutex_enter(&pptr->port_mutex);
9849 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9850 		mutex_exit(&pptr->port_mutex);
9851 	}
9852 
9853 	/*
9854 	 * Lookup for boot WWN property
9855 	 */
9856 	if (modrootloaded != 1) {
9857 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9858 		    ddi_get_parent(pinfo->port_dip),
9859 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9860 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9861 		    (nbytes == FC_WWN_SIZE)) {
9862 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9863 		}
9864 		if (boot_wwn) {
9865 			ddi_prop_free(boot_wwn);
9866 		}
9867 	}
9868 
9869 	/*
9870 	 * Handle various topologies and link states.
9871 	 */
9872 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
9873 	case FC_STATE_OFFLINE:
9874 
9875 		/*
9876 		 * we're attaching a port where the link is offline
9877 		 *
9878 		 * Wait for ONLINE, at which time a state
9879 		 * change will cause a statec_callback
9880 		 *
9881 		 * in the mean time, do not do anything
9882 		 */
9883 		res = DDI_SUCCESS;
9884 		pptr->port_state |= FCP_STATE_OFFLINE;
9885 		break;
9886 
9887 	case FC_STATE_ONLINE: {
9888 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
9889 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
9890 			res = DDI_SUCCESS;
9891 			break;
9892 		}
9893 		/*
9894 		 * discover devices and create nodes (a private
9895 		 * loop or point-to-point)
9896 		 */
9897 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
9898 
9899 		/*
9900 		 * At this point we are going to build a list of all the ports
9901 		 * that	can be reached through this local port.	 It looks like
9902 		 * we cannot handle more than FCP_MAX_DEVICES per local port
9903 		 * (128).
9904 		 */
9905 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
9906 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
9907 		    KM_NOSLEEP)) == NULL) {
9908 			fcp_log(CE_WARN, pptr->port_dip,
9909 			    "!fcp%d: failed to allocate portmap",
9910 			    instance);
9911 			goto fail;
9912 		}
9913 
9914 		/*
9915 		 * fc_ulp_getportmap() is going to provide us with the list of
9916 		 * remote ports in the buffer we just allocated.  The way the
9917 		 * list is going to be retrieved depends on the topology.
9918 		 * However, if we are connected to a Fabric, a name server
9919 		 * request may be sent to get the list of FCP capable ports.
9920 		 * It should be noted that is the case the request is
9921 		 * synchronous.	 This means we are stuck here till the name
9922 		 * server replies.  A lot of things can change during that time
9923 		 * and including, may be, being called on
9924 		 * fcp_statec_callback() for different reasons. I'm not sure
9925 		 * the code can handle that.
9926 		 */
9927 		max_cnt = FCP_MAX_DEVICES;
9928 		alloc_cnt = FCP_MAX_DEVICES;
9929 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
9930 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
9931 		    FC_SUCCESS) {
9932 			caddr_t msg;
9933 
9934 			(void) fc_ulp_error(res, &msg);
9935 
9936 			/*
9937 			 * this	 just means the transport is
9938 			 * busy perhaps building a portmap so,
9939 			 * for now, succeed this port attach
9940 			 * when the transport has a new map,
9941 			 * it'll send us a state change then
9942 			 */
9943 			fcp_log(CE_WARN, pptr->port_dip,
9944 			    "!failed to get port map : %s", msg);
9945 
9946 			res = DDI_SUCCESS;
9947 			break;	/* go return result */
9948 		}
9949 		if (max_cnt > alloc_cnt) {
9950 			alloc_cnt = max_cnt;
9951 		}
9952 
9953 		/*
9954 		 * We are now going to call fcp_statec_callback() ourselves.
9955 		 * By issuing this call we are trying to kick off the enumera-
9956 		 * tion process.
9957 		 */
9958 		/*
9959 		 * let the state change callback do the SCSI device
9960 		 * discovery and create the devinfos
9961 		 */
9962 		fcp_statec_callback(ulph, pptr->port_fp_handle,
9963 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
9964 		    max_cnt, pptr->port_id);
9965 
9966 		res = DDI_SUCCESS;
9967 		break;
9968 	}
9969 
9970 	default:
9971 		/* unknown port state */
9972 		fcp_log(CE_WARN, pptr->port_dip,
9973 		    "!fcp%d: invalid port state at attach=0x%x",
9974 		    instance, pptr->port_phys_state);
9975 
9976 		mutex_enter(&pptr->port_mutex);
9977 		pptr->port_phys_state = FCP_STATE_OFFLINE;
9978 		mutex_exit(&pptr->port_mutex);
9979 
9980 		res = DDI_SUCCESS;
9981 		break;
9982 	}
9983 
9984 	/* free temp list if used */
9985 	if (tmp_list != NULL) {
9986 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
9987 	}
9988 
9989 	/* note the attach time */
9990 	pptr->port_attach_time = lbolt64;
9991 
9992 	/* all done */
9993 	return (res);
9994 
9995 	/* a failure we have to clean up after */
9996 fail:
9997 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
9998 
9999 	if (soft_state_linked) {
10000 		/* remove this fcp_port from the linked list */
10001 		(void) fcp_soft_state_unlink(pptr);
10002 	}
10003 
10004 	/* unbind and free event set */
10005 	if (pptr->port_ndi_event_hdl) {
10006 		if (event_bind) {
10007 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10008 			    &pptr->port_ndi_events, NDI_SLEEP);
10009 		}
10010 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10011 	}
10012 
10013 	if (pptr->port_ndi_event_defs) {
10014 		(void) kmem_free(pptr->port_ndi_event_defs,
10015 		    sizeof (fcp_ndi_event_defs));
10016 	}
10017 
10018 	/*
10019 	 * Clean up mpxio stuff
10020 	 */
10021 	if (pptr->port_mpxio) {
10022 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10023 		pptr->port_mpxio--;
10024 	}
10025 
10026 	/* undo SCSI HBA setup */
10027 	if (hba_attached) {
10028 		(void) scsi_hba_detach(pptr->port_dip);
10029 	}
10030 	if (pptr->port_tran != NULL) {
10031 		scsi_hba_tran_free(pptr->port_tran);
10032 	}
10033 
10034 	mutex_enter(&fcp_global_mutex);
10035 
10036 	/*
10037 	 * We check soft_state_linked, because it is incremented right before
10038 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10039 	 * soft_state_linked is still FALSE, we do not want to decrement
10040 	 * fcp_watchdog_init or possibly call untimeout.
10041 	 */
10042 
10043 	if (soft_state_linked) {
10044 		if (--fcp_watchdog_init == 0) {
10045 			timeout_id_t	tid = fcp_watchdog_id;
10046 
10047 			mutex_exit(&fcp_global_mutex);
10048 			(void) untimeout(tid);
10049 		} else {
10050 			mutex_exit(&fcp_global_mutex);
10051 		}
10052 	} else {
10053 		mutex_exit(&fcp_global_mutex);
10054 	}
10055 
10056 	if (mutex_initted) {
10057 		mutex_destroy(&pptr->port_mutex);
10058 		mutex_destroy(&pptr->port_pkt_mutex);
10059 	}
10060 
10061 	if (tmp_list != NULL) {
10062 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10063 	}
10064 
10065 	/* this makes pptr invalid */
10066 	ddi_soft_state_free(fcp_softstate, instance);
10067 
10068 	return (DDI_FAILURE);
10069 }
10070 
10071 
10072 static int
10073 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10074 {
10075 	int count = 0;
10076 
10077 	mutex_enter(&pptr->port_mutex);
10078 
10079 	/*
10080 	 * if the port is powered down or suspended, nothing else
10081 	 * to do; just return.
10082 	 */
10083 	if (flag != FCP_STATE_DETACHING) {
10084 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10085 		    FCP_STATE_SUSPENDED)) {
10086 			pptr->port_state |= flag;
10087 			mutex_exit(&pptr->port_mutex);
10088 			return (FC_SUCCESS);
10089 		}
10090 	}
10091 
10092 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10093 		mutex_exit(&pptr->port_mutex);
10094 		return (FC_FAILURE);
10095 	}
10096 
10097 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10098 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10099 	    "fcp_handle_port_detach: port is detaching");
10100 
10101 	pptr->port_state |= flag;
10102 
10103 	/*
10104 	 * Wait for any ongoing reconfig/ipkt to complete, that
10105 	 * ensures the freeing to targets/luns is safe.
10106 	 * No more ref to this port should happen from statec/ioctl
10107 	 * after that as it was removed from the global port list.
10108 	 */
10109 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10110 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10111 		/*
10112 		 * Let's give sufficient time for reconfig/ipkt
10113 		 * to complete.
10114 		 */
10115 		if (count++ >= FCP_ICMD_DEADLINE) {
10116 			break;
10117 		}
10118 		mutex_exit(&pptr->port_mutex);
10119 		delay(drv_usectohz(1000000));
10120 		mutex_enter(&pptr->port_mutex);
10121 	}
10122 
10123 	/*
10124 	 * if the driver is still busy then fail to
10125 	 * suspend/power down.
10126 	 */
10127 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10128 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10129 		pptr->port_state &= ~flag;
10130 		mutex_exit(&pptr->port_mutex);
10131 		return (FC_FAILURE);
10132 	}
10133 
10134 	if (flag == FCP_STATE_DETACHING) {
10135 		pptr = fcp_soft_state_unlink(pptr);
10136 		ASSERT(pptr != NULL);
10137 	}
10138 
10139 	pptr->port_link_cnt++;
10140 	pptr->port_state |= FCP_STATE_OFFLINE;
10141 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10142 
10143 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10144 	    FCP_CAUSE_LINK_DOWN);
10145 	mutex_exit(&pptr->port_mutex);
10146 
10147 	/* kill watch dog timer if we're the last */
10148 	mutex_enter(&fcp_global_mutex);
10149 	if (--fcp_watchdog_init == 0) {
10150 		timeout_id_t	tid = fcp_watchdog_id;
10151 		mutex_exit(&fcp_global_mutex);
10152 		(void) untimeout(tid);
10153 	} else {
10154 		mutex_exit(&fcp_global_mutex);
10155 	}
10156 
10157 	/* clean up the port structures */
10158 	if (flag == FCP_STATE_DETACHING) {
10159 		fcp_cleanup_port(pptr, instance);
10160 	}
10161 
10162 	return (FC_SUCCESS);
10163 }
10164 
10165 
10166 static void
10167 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10168 {
10169 	ASSERT(pptr != NULL);
10170 
10171 	/* unbind and free event set */
10172 	if (pptr->port_ndi_event_hdl) {
10173 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10174 		    &pptr->port_ndi_events, NDI_SLEEP);
10175 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10176 	}
10177 
10178 	if (pptr->port_ndi_event_defs) {
10179 		(void) kmem_free(pptr->port_ndi_event_defs,
10180 		    sizeof (fcp_ndi_event_defs));
10181 	}
10182 
10183 	/* free the lun/target structures and devinfos */
10184 	fcp_free_targets(pptr);
10185 
10186 	/*
10187 	 * Clean up mpxio stuff
10188 	 */
10189 	if (pptr->port_mpxio) {
10190 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10191 		pptr->port_mpxio--;
10192 	}
10193 
10194 	/* clean up SCSA stuff */
10195 	(void) scsi_hba_detach(pptr->port_dip);
10196 	if (pptr->port_tran != NULL) {
10197 		scsi_hba_tran_free(pptr->port_tran);
10198 	}
10199 
10200 #ifdef	KSTATS_CODE
10201 	/* clean up kstats */
10202 	if (pptr->fcp_ksp != NULL) {
10203 		kstat_delete(pptr->fcp_ksp);
10204 	}
10205 #endif
10206 
10207 	/* clean up soft state mutexes/condition variables */
10208 	mutex_destroy(&pptr->port_mutex);
10209 	mutex_destroy(&pptr->port_pkt_mutex);
10210 
10211 	/* all done with soft state */
10212 	ddi_soft_state_free(fcp_softstate, instance);
10213 }
10214 
10215 /*
10216  *     Function: fcp_kmem_cache_constructor
10217  *
10218  *  Description: This function allocates and initializes the resources required
10219  *		 to build a scsi_pkt structure the target driver.  The result
10220  *		 of the allocation and initialization will be cached in the
10221  *		 memory cache.	As DMA resources may be allocated here, that
10222  *		 means DMA resources will be tied up in the cache manager.
10223  *		 This is a tradeoff that has been made for performance reasons.
10224  *
10225  *     Argument: *buf		Memory to preinitialize.
10226  *		 *arg		FCP port structure (fcp_port).
10227  *		 kmflags	Value passed to kmem_cache_alloc() and
10228  *				propagated to the constructor.
10229  *
10230  * Return Value: 0	Allocation/Initialization was successful.
10231  *		 -1	Allocation or Initialization failed.
10232  *
10233  *
10234  * If the returned value is 0, the buffer is initialized like this:
10235  *
10236  *		    +================================+
10237  *	     +----> |	      struct scsi_pkt	     |
10238  *	     |	    |				     |
10239  *	     | +--- | pkt_ha_private		     |
10240  *	     | |    |				     |
10241  *	     | |    +================================+
10242  *	     | |
10243  *	     | |    +================================+
10244  *	     | +--> |	    struct fcp_pkt	     | <---------+
10245  *	     |	    |				     |		 |
10246  *	     +----- | cmd_pkt			     |		 |
10247  *		    |			  cmd_fp_pkt | ---+	 |
10248  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10249  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10250  *	  |    |    |--------------------------------|	  |	 |
10251  *	  |    |    |	      struct fc_packet	     | <--+	 |
10252  *	  |    |    |				     |		 |
10253  *	  |    |    |		     pkt_ulp_private | ----------+
10254  *	  |    |    |		     pkt_fca_private | -----+
10255  *	  |    |    |		     pkt_data_cookie | ---+ |
10256  *	  |    |    | pkt_cmdlen		     |	  | |
10257  *	  |    |(a) | pkt_rsplen		     |	  | |
10258  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10259  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10260  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10261  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10262  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10263  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10264  *		    +================================+	  | |  |   |   |    |
10265  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10266  *		    |				     |	    |  |   |   |    |
10267  *		    +================================+	    |  |   |   |    |
10268  *		    |	      fca_private	     | <----+  |   |   |    |
10269  *		    |				     |	       |   |   |    |
10270  *		    +================================+	       |   |   |    |
10271  *							       |   |   |    |
10272  *							       |   |   |    |
10273  *		    +================================+	 (d)   |   |   |    |
10274  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10275  *		    |				     |		   |   |    |
10276  *		    +================================+		   |   |    |
10277  *								   |   |    |
10278  *		    +================================+	 (d)	   |   |    |
10279  *		    |		fcp_resp	     | <-----------+   |    |
10280  *		    |	(DMA resources associated)   |		       |    |
10281  *		    +================================+		       |    |
10282  *								       |    |
10283  *								       |    |
10284  *								       |    |
10285  *		    +================================+	 (c)	       |    |
10286  *		    |	     fcp_cmd cookies	     | <---------------+    |
10287  *		    |				     |			    |
10288  *		    +================================+			    |
10289  *									    |
10290  *		    +================================+	 (c)		    |
10291  *		    |		 fcp_cmd	     | <--------------------+
10292  *		    |	(DMA resources associated)   |
10293  *		    +================================+
10294  *
10295  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10296  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10297  * (c) Only if DMA is used for the FCP_CMD buffer.
10298  * (d) Only if DMA is used for the FCP_RESP buffer
10299  */
10300 static int
10301 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10302     int kmflags)
10303 {
10304 	struct fcp_pkt	*cmd;
10305 	struct fcp_port	*pptr;
10306 	fc_packet_t	*fpkt;
10307 
10308 	pptr = (struct fcp_port *)tran->tran_hba_private;
10309 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10310 	bzero(cmd, tran->tran_hba_len);
10311 
10312 	cmd->cmd_pkt = pkt;
10313 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10314 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10315 	cmd->cmd_fp_pkt = fpkt;
10316 
10317 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10318 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10319 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10320 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10321 
10322 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10323 	    sizeof (struct fcp_pkt));
10324 
10325 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10326 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10327 
10328 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10329 		/*
10330 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10331 		 * fcp_resp.  The transfer of information will be done by
10332 		 * bcopy.
10333 		 * The naming of the flags (that is actually a value) is
10334 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10335 		 * DMA" but instead "NO DMA".
10336 		 */
10337 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10338 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10339 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10340 	} else {
10341 		/*
10342 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10343 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10344 		 * interfaces.
10345 		 */
10346 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10347 			return (-1);
10348 		}
10349 	}
10350 
10351 	return (0);
10352 }
10353 
10354 /*
10355  *     Function: fcp_kmem_cache_destructor
10356  *
10357  *  Description: Called by the destructor of the cache managed by SCSA.
10358  *		 All the resources pre-allocated in fcp_pkt_constructor
10359  *		 and the data also pre-initialized in fcp_pkt_constructor
10360  *		 are freed and uninitialized here.
10361  *
10362  *     Argument: *buf		Memory to uninitialize.
10363  *		 *arg		FCP port structure (fcp_port).
10364  *
10365  * Return Value: None
10366  *
10367  *	Context: kernel
10368  */
10369 static void
10370 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10371 {
10372 	struct fcp_pkt	*cmd;
10373 	struct fcp_port	*pptr;
10374 
10375 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10376 	cmd = pkt->pkt_ha_private;
10377 
10378 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10379 		/*
10380 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10381 		 * buffer and DMA resources allocated to do so are released.
10382 		 */
10383 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10384 	}
10385 }
10386 
10387 /*
10388  *     Function: fcp_alloc_cmd_resp
10389  *
10390  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10391  *		 will be DMAed by the HBA.  The buffer is allocated applying
10392  *		 the DMA requirements for the HBA.  The buffers allocated will
10393  *		 also be bound.	 DMA resources are allocated in the process.
10394  *		 They will be released by fcp_free_cmd_resp().
10395  *
10396  *     Argument: *pptr	FCP port.
10397  *		 *fpkt	fc packet for which the cmd and resp packet should be
10398  *			allocated.
10399  *		 flags	Allocation flags.
10400  *
10401  * Return Value: FC_FAILURE
10402  *		 FC_SUCCESS
10403  *
10404  *	Context: User or Kernel context only if flags == KM_SLEEP.
10405  *		 Interrupt context if the KM_SLEEP is not specified.
10406  */
10407 static int
10408 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10409 {
10410 	int			rval;
10411 	int			cmd_len;
10412 	int			resp_len;
10413 	ulong_t			real_len;
10414 	int			(*cb) (caddr_t);
10415 	ddi_dma_cookie_t	pkt_cookie;
10416 	ddi_dma_cookie_t	*cp;
10417 	uint32_t		cnt;
10418 
10419 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10420 
10421 	cmd_len = fpkt->pkt_cmdlen;
10422 	resp_len = fpkt->pkt_rsplen;
10423 
10424 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10425 
10426 	/* Allocation of a DMA handle used in subsequent calls. */
10427 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10428 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10429 		return (FC_FAILURE);
10430 	}
10431 
10432 	/* A buffer is allocated that satisfies the DMA requirements. */
10433 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10434 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10435 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10436 
10437 	if (rval != DDI_SUCCESS) {
10438 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10439 		return (FC_FAILURE);
10440 	}
10441 
10442 	if (real_len < cmd_len) {
10443 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10444 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10445 		return (FC_FAILURE);
10446 	}
10447 
10448 	/* The buffer allocated is DMA bound. */
10449 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10450 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10451 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10452 
10453 	if (rval != DDI_DMA_MAPPED) {
10454 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10455 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10456 		return (FC_FAILURE);
10457 	}
10458 
10459 	if (fpkt->pkt_cmd_cookie_cnt >
10460 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10461 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10462 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10463 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10464 		return (FC_FAILURE);
10465 	}
10466 
10467 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10468 
10469 	/*
10470 	 * The buffer where the scatter/gather list is going to be built is
10471 	 * allocated.
10472 	 */
10473 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10474 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10475 	    KM_NOSLEEP);
10476 
10477 	if (cp == NULL) {
10478 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10479 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10480 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10481 		return (FC_FAILURE);
10482 	}
10483 
10484 	/*
10485 	 * The scatter/gather list for the buffer we just allocated is built
10486 	 * here.
10487 	 */
10488 	*cp = pkt_cookie;
10489 	cp++;
10490 
10491 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10492 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10493 		    &pkt_cookie);
10494 		*cp = pkt_cookie;
10495 	}
10496 
10497 	ASSERT(fpkt->pkt_resp_dma == NULL);
10498 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10499 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10500 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10501 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10502 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10503 		return (FC_FAILURE);
10504 	}
10505 
10506 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10507 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10508 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10509 	    &fpkt->pkt_resp_acc);
10510 
10511 	if (rval != DDI_SUCCESS) {
10512 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10513 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10514 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10515 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10516 		kmem_free(fpkt->pkt_cmd_cookie,
10517 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10518 		return (FC_FAILURE);
10519 	}
10520 
10521 	if (real_len < resp_len) {
10522 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10523 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10524 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10525 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10526 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10527 		kmem_free(fpkt->pkt_cmd_cookie,
10528 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10529 		return (FC_FAILURE);
10530 	}
10531 
10532 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10533 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10534 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10535 
10536 	if (rval != DDI_DMA_MAPPED) {
10537 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10538 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10539 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10540 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10541 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10542 		kmem_free(fpkt->pkt_cmd_cookie,
10543 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10544 		return (FC_FAILURE);
10545 	}
10546 
10547 	if (fpkt->pkt_resp_cookie_cnt >
10548 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10549 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10550 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10551 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10552 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10553 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10554 		kmem_free(fpkt->pkt_cmd_cookie,
10555 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10556 		return (FC_FAILURE);
10557 	}
10558 
10559 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10560 
10561 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10562 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10563 	    KM_NOSLEEP);
10564 
10565 	if (cp == NULL) {
10566 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10567 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10568 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10569 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10570 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10571 		kmem_free(fpkt->pkt_cmd_cookie,
10572 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10573 		return (FC_FAILURE);
10574 	}
10575 
10576 	*cp = pkt_cookie;
10577 	cp++;
10578 
10579 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10580 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10581 		    &pkt_cookie);
10582 		*cp = pkt_cookie;
10583 	}
10584 
10585 	return (FC_SUCCESS);
10586 }
10587 
10588 /*
10589  *     Function: fcp_free_cmd_resp
10590  *
10591  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10592  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10593  *		 associated with them.	That includes the DMA resources and the
10594  *		 buffer allocated for the cookies of each one of them.
10595  *
10596  *     Argument: *pptr		FCP port context.
10597  *		 *fpkt		fc packet containing the cmd and resp packet
10598  *				to be released.
10599  *
10600  * Return Value: None
10601  *
10602  *	Context: Interrupt, User and Kernel context.
10603  */
10604 /* ARGSUSED */
10605 static void
10606 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10607 {
10608 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10609 
10610 	if (fpkt->pkt_resp_dma) {
10611 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10612 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10613 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10614 	}
10615 
10616 	if (fpkt->pkt_resp_cookie) {
10617 		kmem_free(fpkt->pkt_resp_cookie,
10618 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10619 		fpkt->pkt_resp_cookie = NULL;
10620 	}
10621 
10622 	if (fpkt->pkt_cmd_dma) {
10623 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10624 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10625 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10626 	}
10627 
10628 	if (fpkt->pkt_cmd_cookie) {
10629 		kmem_free(fpkt->pkt_cmd_cookie,
10630 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10631 		fpkt->pkt_cmd_cookie = NULL;
10632 	}
10633 }
10634 
10635 
10636 /*
10637  * called by the transport to do our own target initialization
10638  *
10639  * can acquire and release the global mutex
10640  */
10641 /* ARGSUSED */
10642 static int
10643 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10644     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10645 {
10646 	uchar_t			*bytes;
10647 	uint_t			nbytes;
10648 	uint16_t		lun_num;
10649 	struct fcp_tgt	*ptgt;
10650 	struct fcp_lun	*plun;
10651 	struct fcp_port	*pptr = (struct fcp_port *)
10652 	    hba_tran->tran_hba_private;
10653 
10654 	ASSERT(pptr != NULL);
10655 
10656 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10657 	    FCP_BUF_LEVEL_8, 0,
10658 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10659 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10660 
10661 	/* get our port WWN property */
10662 	bytes = NULL;
10663 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10664 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10665 	    (nbytes != FC_WWN_SIZE)) {
10666 		/* no port WWN property */
10667 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10668 		    FCP_BUF_LEVEL_8, 0,
10669 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10670 		    " for %s (instance %d): bytes=%p nbytes=%x",
10671 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10672 		    nbytes);
10673 
10674 		if (bytes != NULL) {
10675 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10676 		}
10677 
10678 		return (DDI_NOT_WELL_FORMED);
10679 	}
10680 	ASSERT(bytes != NULL);
10681 
10682 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10683 	    LUN_PROP, 0xFFFF);
10684 	if (lun_num == 0xFFFF) {
10685 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10686 		    FCP_BUF_LEVEL_8, 0,
10687 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10688 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10689 		    ddi_get_instance(tgt_dip));
10690 
10691 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10692 		return (DDI_NOT_WELL_FORMED);
10693 	}
10694 
10695 	mutex_enter(&pptr->port_mutex);
10696 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10697 		mutex_exit(&pptr->port_mutex);
10698 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10699 		    FCP_BUF_LEVEL_8, 0,
10700 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10701 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10702 		    ddi_get_instance(tgt_dip));
10703 
10704 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10705 		return (DDI_FAILURE);
10706 	}
10707 
10708 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10709 	    FC_WWN_SIZE) == 0);
10710 	ASSERT(plun->lun_num == lun_num);
10711 
10712 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10713 
10714 	ptgt = plun->lun_tgt;
10715 
10716 	mutex_enter(&ptgt->tgt_mutex);
10717 	plun->lun_tgt_count++;
10718 	scsi_device_hba_private_set(sd, plun);
10719 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10720 	plun->lun_sd = sd;
10721 	mutex_exit(&ptgt->tgt_mutex);
10722 	mutex_exit(&pptr->port_mutex);
10723 
10724 	return (DDI_SUCCESS);
10725 }
10726 
10727 /*ARGSUSED*/
10728 static int
10729 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10730     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10731 {
10732 	uchar_t			*bytes;
10733 	uint_t			nbytes;
10734 	uint16_t		lun_num;
10735 	struct fcp_tgt	*ptgt;
10736 	struct fcp_lun	*plun;
10737 	struct fcp_port	*pptr = (struct fcp_port *)
10738 	    hba_tran->tran_hba_private;
10739 	child_info_t		*cip;
10740 
10741 	ASSERT(pptr != NULL);
10742 
10743 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10744 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10745 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10746 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10747 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10748 
10749 	cip = (child_info_t *)sd->sd_pathinfo;
10750 	if (cip == NULL) {
10751 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10752 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10753 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10754 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10755 		    ddi_get_instance(tgt_dip));
10756 
10757 		return (DDI_NOT_WELL_FORMED);
10758 	}
10759 
10760 	/* get our port WWN property */
10761 	bytes = NULL;
10762 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10763 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10764 	    (nbytes != FC_WWN_SIZE)) {
10765 		if (bytes)
10766 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10767 		return (DDI_NOT_WELL_FORMED);
10768 	}
10769 
10770 	ASSERT(bytes != NULL);
10771 
10772 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10773 	    LUN_PROP, 0xFFFF);
10774 	if (lun_num == 0xFFFF) {
10775 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10776 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10777 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10778 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10779 		    ddi_get_instance(tgt_dip));
10780 
10781 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10782 		return (DDI_NOT_WELL_FORMED);
10783 	}
10784 
10785 	mutex_enter(&pptr->port_mutex);
10786 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10787 		mutex_exit(&pptr->port_mutex);
10788 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10789 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10790 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10791 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10792 		    ddi_get_instance(tgt_dip));
10793 
10794 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10795 		return (DDI_FAILURE);
10796 	}
10797 
10798 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10799 	    FC_WWN_SIZE) == 0);
10800 	ASSERT(plun->lun_num == lun_num);
10801 
10802 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10803 
10804 	ptgt = plun->lun_tgt;
10805 
10806 	mutex_enter(&ptgt->tgt_mutex);
10807 	plun->lun_tgt_count++;
10808 	scsi_device_hba_private_set(sd, plun);
10809 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10810 	plun->lun_sd = sd;
10811 	mutex_exit(&ptgt->tgt_mutex);
10812 	mutex_exit(&pptr->port_mutex);
10813 
10814 	return (DDI_SUCCESS);
10815 }
10816 
10817 
10818 /*
10819  * called by the transport to do our own target initialization
10820  *
10821  * can acquire and release the global mutex
10822  */
10823 /* ARGSUSED */
10824 static int
10825 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10826     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10827 {
10828 	struct fcp_port	*pptr = (struct fcp_port *)
10829 	    hba_tran->tran_hba_private;
10830 	int			rval;
10831 
10832 	ASSERT(pptr != NULL);
10833 
10834 	/*
10835 	 * Child node is getting initialized.  Look at the mpxio component
10836 	 * type on the child device to see if this device is mpxio managed
10837 	 * or not.
10838 	 */
10839 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10840 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10841 	} else {
10842 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10843 	}
10844 
10845 	return (rval);
10846 }
10847 
10848 
10849 /* ARGSUSED */
10850 static void
10851 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10852     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10853 {
10854 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
10855 	struct fcp_tgt	*ptgt;
10856 
10857 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10858 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10859 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10860 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10861 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10862 
10863 	if (plun == NULL) {
10864 		return;
10865 	}
10866 	ptgt = plun->lun_tgt;
10867 
10868 	ASSERT(ptgt != NULL);
10869 
10870 	mutex_enter(&ptgt->tgt_mutex);
10871 	ASSERT(plun->lun_tgt_count > 0);
10872 
10873 	if (--plun->lun_tgt_count == 0) {
10874 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
10875 	}
10876 	plun->lun_sd = NULL;
10877 	mutex_exit(&ptgt->tgt_mutex);
10878 }
10879 
10880 /*
10881  *     Function: fcp_scsi_start
10882  *
10883  *  Description: This function is called by the target driver to request a
10884  *		 command to be sent.
10885  *
10886  *     Argument: *ap		SCSI address of the device.
10887  *		 *pkt		SCSI packet containing the cmd to send.
10888  *
10889  * Return Value: TRAN_ACCEPT
10890  *		 TRAN_BUSY
10891  *		 TRAN_BADPKT
10892  *		 TRAN_FATAL_ERROR
10893  */
10894 static int
10895 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
10896 {
10897 	struct fcp_port	*pptr = ADDR2FCP(ap);
10898 	struct fcp_lun	*plun = ADDR2LUN(ap);
10899 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
10900 	struct fcp_tgt	*ptgt = plun->lun_tgt;
10901 	int			rval;
10902 
10903 	/* ensure command isn't already issued */
10904 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
10905 
10906 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10907 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
10908 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
10909 
10910 	/*
10911 	 * It is strange that we enter the fcp_port mutex and the target
10912 	 * mutex to check the lun state (which has a mutex of its own).
10913 	 */
10914 	mutex_enter(&pptr->port_mutex);
10915 	mutex_enter(&ptgt->tgt_mutex);
10916 
10917 	/*
10918 	 * If the device is offline and is not in the process of coming
10919 	 * online, fail the request.
10920 	 */
10921 
10922 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
10923 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
10924 		mutex_exit(&ptgt->tgt_mutex);
10925 		mutex_exit(&pptr->port_mutex);
10926 
10927 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
10928 			pkt->pkt_reason = CMD_DEV_GONE;
10929 		}
10930 
10931 		return (TRAN_FATAL_ERROR);
10932 	}
10933 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
10934 
10935 	/*
10936 	 * If we are suspended, kernel is trying to dump, so don't
10937 	 * block, fail or defer requests - send them down right away.
10938 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
10939 	 * assume we have been suspended.  There is hardware such as
10940 	 * the v880 that doesn't do PM.	 Thus, the check for
10941 	 * ddi_in_panic.
10942 	 *
10943 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
10944 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
10945 	 * either the device will have gone away or changed and we can fail
10946 	 * the request, or we can proceed if the device didn't change.
10947 	 *
10948 	 * If the pd in the target or the packet is NULL it's probably
10949 	 * because the device has gone away, we allow the request to be
10950 	 * put on the internal queue here in case the device comes back within
10951 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
10952 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
10953 	 * could be NULL because the device was disappearing during or since
10954 	 * packet initialization.
10955 	 */
10956 
10957 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
10958 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
10959 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
10960 	    (ptgt->tgt_pd_handle == NULL) ||
10961 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
10962 		/*
10963 		 * If ((LUN is busy AND
10964 		 *	LUN not suspended AND
10965 		 *	The system is not in panic state) OR
10966 		 *	(The port is coming up))
10967 		 *
10968 		 * We check to see if the any of the flags FLAG_NOINTR or
10969 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
10970 		 * returned will be TRAN_BUSY.	If not, the request is queued.
10971 		 */
10972 		mutex_exit(&ptgt->tgt_mutex);
10973 		mutex_exit(&pptr->port_mutex);
10974 
10975 		/* see if using interrupts is allowed (so queueing'll work) */
10976 		if (pkt->pkt_flags & FLAG_NOINTR) {
10977 			pkt->pkt_resid = 0;
10978 			return (TRAN_BUSY);
10979 		}
10980 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
10981 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10982 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
10983 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
10984 			return (TRAN_BUSY);
10985 		}
10986 #ifdef	DEBUG
10987 		mutex_enter(&pptr->port_pkt_mutex);
10988 		pptr->port_npkts++;
10989 		mutex_exit(&pptr->port_pkt_mutex);
10990 #endif /* DEBUG */
10991 
10992 		/* got queue up the pkt for later */
10993 		fcp_queue_pkt(pptr, cmd);
10994 		return (TRAN_ACCEPT);
10995 	}
10996 	cmd->cmd_state = FCP_PKT_ISSUED;
10997 
10998 	mutex_exit(&ptgt->tgt_mutex);
10999 	mutex_exit(&pptr->port_mutex);
11000 
11001 	/*
11002 	 * Now that we released the mutexes, what was protected by them can
11003 	 * change.
11004 	 */
11005 
11006 	/*
11007 	 * If there is a reconfiguration in progress, wait for it to complete.
11008 	 */
11009 	fcp_reconfig_wait(pptr);
11010 
11011 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11012 	    pkt->pkt_time : 0;
11013 
11014 	/* prepare the packet */
11015 
11016 	fcp_prepare_pkt(pptr, cmd, plun);
11017 
11018 	if (cmd->cmd_pkt->pkt_time) {
11019 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11020 	} else {
11021 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11022 	}
11023 
11024 	/*
11025 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11026 	 * have to do polled I/O
11027 	 */
11028 	if (pkt->pkt_flags & FLAG_NOINTR) {
11029 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11030 		return (fcp_dopoll(pptr, cmd));
11031 	}
11032 
11033 #ifdef	DEBUG
11034 	mutex_enter(&pptr->port_pkt_mutex);
11035 	pptr->port_npkts++;
11036 	mutex_exit(&pptr->port_pkt_mutex);
11037 #endif /* DEBUG */
11038 
11039 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11040 	if (rval == FC_SUCCESS) {
11041 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11042 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11043 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11044 		return (TRAN_ACCEPT);
11045 	}
11046 
11047 	cmd->cmd_state = FCP_PKT_IDLE;
11048 
11049 #ifdef	DEBUG
11050 	mutex_enter(&pptr->port_pkt_mutex);
11051 	pptr->port_npkts--;
11052 	mutex_exit(&pptr->port_pkt_mutex);
11053 #endif /* DEBUG */
11054 
11055 	/*
11056 	 * For lack of clearer definitions, choose
11057 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11058 	 */
11059 
11060 	if (rval == FC_TRAN_BUSY) {
11061 		pkt->pkt_resid = 0;
11062 		rval = TRAN_BUSY;
11063 	} else {
11064 		mutex_enter(&ptgt->tgt_mutex);
11065 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11066 			child_info_t	*cip;
11067 
11068 			mutex_enter(&plun->lun_mutex);
11069 			cip = plun->lun_cip;
11070 			mutex_exit(&plun->lun_mutex);
11071 
11072 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11073 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11074 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11075 			    plun->lun_tgt->tgt_d_id, rval, cip);
11076 
11077 			rval = TRAN_FATAL_ERROR;
11078 		} else {
11079 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11080 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11081 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11082 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11083 				    pkt);
11084 				rval = TRAN_BUSY;
11085 			} else {
11086 				rval = TRAN_ACCEPT;
11087 				fcp_queue_pkt(pptr, cmd);
11088 			}
11089 		}
11090 		mutex_exit(&ptgt->tgt_mutex);
11091 	}
11092 
11093 	return (rval);
11094 }
11095 
11096 /*
11097  * called by the transport to abort a packet
11098  */
11099 /*ARGSUSED*/
11100 static int
11101 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11102 {
11103 	int tgt_cnt;
11104 	struct fcp_port		*pptr = ADDR2FCP(ap);
11105 	struct fcp_lun	*plun = ADDR2LUN(ap);
11106 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11107 
11108 	if (pkt == NULL) {
11109 		if (ptgt) {
11110 			mutex_enter(&ptgt->tgt_mutex);
11111 			tgt_cnt = ptgt->tgt_change_cnt;
11112 			mutex_exit(&ptgt->tgt_mutex);
11113 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11114 			return (TRUE);
11115 		}
11116 	}
11117 	return (FALSE);
11118 }
11119 
11120 
11121 /*
11122  * Perform reset
11123  */
11124 int
11125 fcp_scsi_reset(struct scsi_address *ap, int level)
11126 {
11127 	int			rval = 0;
11128 	struct fcp_port		*pptr = ADDR2FCP(ap);
11129 	struct fcp_lun	*plun = ADDR2LUN(ap);
11130 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11131 
11132 	if (level == RESET_ALL) {
11133 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11134 			rval = 1;
11135 		}
11136 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11137 		/*
11138 		 * If we are in the middle of discovery, return
11139 		 * SUCCESS as this target will be rediscovered
11140 		 * anyway
11141 		 */
11142 		mutex_enter(&ptgt->tgt_mutex);
11143 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11144 			mutex_exit(&ptgt->tgt_mutex);
11145 			return (1);
11146 		}
11147 		mutex_exit(&ptgt->tgt_mutex);
11148 
11149 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11150 			rval = 1;
11151 		}
11152 	}
11153 	return (rval);
11154 }
11155 
11156 
11157 /*
11158  * called by the framework to get a SCSI capability
11159  */
11160 static int
11161 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11162 {
11163 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11164 }
11165 
11166 
11167 /*
11168  * called by the framework to set a SCSI capability
11169  */
11170 static int
11171 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11172 {
11173 	return (fcp_commoncap(ap, cap, value, whom, 1));
11174 }
11175 
11176 /*
11177  *     Function: fcp_pkt_setup
11178  *
11179  *  Description: This function sets up the scsi_pkt structure passed by the
11180  *		 caller. This function assumes fcp_pkt_constructor has been
11181  *		 called previously for the packet passed by the caller.	 If
11182  *		 successful this call will have the following results:
11183  *
11184  *		   - The resources needed that will be constant through out
11185  *		     the whole transaction are allocated.
11186  *		   - The fields that will be constant through out the whole
11187  *		     transaction are initialized.
11188  *		   - The scsi packet will be linked to the LUN structure
11189  *		     addressed by the transaction.
11190  *
11191  *     Argument:
11192  *		 *pkt		Pointer to a scsi_pkt structure.
11193  *		 callback
11194  *		 arg
11195  *
11196  * Return Value: 0	Success
11197  *		 !0	Failure
11198  *
11199  *	Context: Kernel context or interrupt context
11200  */
11201 /* ARGSUSED */
11202 static int
11203 fcp_pkt_setup(struct scsi_pkt *pkt,
11204     int (*callback)(caddr_t arg),
11205     caddr_t arg)
11206 {
11207 	struct fcp_pkt	*cmd;
11208 	struct fcp_port	*pptr;
11209 	struct fcp_lun	*plun;
11210 	struct fcp_tgt	*ptgt;
11211 	int		kf;
11212 	fc_packet_t	*fpkt;
11213 	fc_frame_hdr_t	*hp;
11214 
11215 	pptr = ADDR2FCP(&pkt->pkt_address);
11216 	plun = ADDR2LUN(&pkt->pkt_address);
11217 	ptgt = plun->lun_tgt;
11218 
11219 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11220 	fpkt = cmd->cmd_fp_pkt;
11221 
11222 	/*
11223 	 * this request is for dma allocation only
11224 	 */
11225 	/*
11226 	 * First step of fcp_scsi_init_pkt: pkt allocation
11227 	 * We determine if the caller is willing to wait for the
11228 	 * resources.
11229 	 */
11230 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11231 
11232 	/*
11233 	 * Selective zeroing of the pkt.
11234 	 */
11235 	cmd->cmd_back = NULL;
11236 	cmd->cmd_next = NULL;
11237 
11238 	/*
11239 	 * Zero out fcp command
11240 	 */
11241 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11242 
11243 	cmd->cmd_state = FCP_PKT_IDLE;
11244 
11245 	fpkt = cmd->cmd_fp_pkt;
11246 	fpkt->pkt_data_acc = NULL;
11247 
11248 	mutex_enter(&ptgt->tgt_mutex);
11249 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11250 
11251 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11252 	    != FC_SUCCESS) {
11253 		mutex_exit(&ptgt->tgt_mutex);
11254 		return (-1);
11255 	}
11256 
11257 	mutex_exit(&ptgt->tgt_mutex);
11258 
11259 	/* Fill in the Fabric Channel Header */
11260 	hp = &fpkt->pkt_cmd_fhdr;
11261 	hp->r_ctl = R_CTL_COMMAND;
11262 	hp->rsvd = 0;
11263 	hp->type = FC_TYPE_SCSI_FCP;
11264 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11265 	hp->seq_id = 0;
11266 	hp->df_ctl  = 0;
11267 	hp->seq_cnt = 0;
11268 	hp->ox_id = 0xffff;
11269 	hp->rx_id = 0xffff;
11270 	hp->ro = 0;
11271 
11272 	/*
11273 	 * A doubly linked list (cmd_forw, cmd_back) is built
11274 	 * out of every allocated packet on a per-lun basis
11275 	 *
11276 	 * The packets are maintained in the list so as to satisfy
11277 	 * scsi_abort() requests. At present (which is unlikely to
11278 	 * change in the future) nobody performs a real scsi_abort
11279 	 * in the SCSI target drivers (as they don't keep the packets
11280 	 * after doing scsi_transport - so they don't know how to
11281 	 * abort a packet other than sending a NULL to abort all
11282 	 * outstanding packets)
11283 	 */
11284 	mutex_enter(&plun->lun_mutex);
11285 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11286 		plun->lun_pkt_head->cmd_back = cmd;
11287 	} else {
11288 		plun->lun_pkt_tail = cmd;
11289 	}
11290 	plun->lun_pkt_head = cmd;
11291 	mutex_exit(&plun->lun_mutex);
11292 	return (0);
11293 }
11294 
11295 /*
11296  *     Function: fcp_pkt_teardown
11297  *
11298  *  Description: This function releases a scsi_pkt structure and all the
11299  *		 resources attached to it.
11300  *
11301  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11302  *
11303  * Return Value: None
11304  *
11305  *	Context: User, Kernel or Interrupt context.
11306  */
11307 static void
11308 fcp_pkt_teardown(struct scsi_pkt *pkt)
11309 {
11310 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11311 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11312 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11313 
11314 	/*
11315 	 * Remove the packet from the per-lun list
11316 	 */
11317 	mutex_enter(&plun->lun_mutex);
11318 	if (cmd->cmd_back) {
11319 		ASSERT(cmd != plun->lun_pkt_head);
11320 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11321 	} else {
11322 		ASSERT(cmd == plun->lun_pkt_head);
11323 		plun->lun_pkt_head = cmd->cmd_forw;
11324 	}
11325 
11326 	if (cmd->cmd_forw) {
11327 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11328 	} else {
11329 		ASSERT(cmd == plun->lun_pkt_tail);
11330 		plun->lun_pkt_tail = cmd->cmd_back;
11331 	}
11332 
11333 	mutex_exit(&plun->lun_mutex);
11334 
11335 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11336 }
11337 
11338 /*
11339  * Routine for reset notification setup, to register or cancel.
11340  * This function is called by SCSA
11341  */
11342 /*ARGSUSED*/
11343 static int
11344 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11345     void (*callback)(caddr_t), caddr_t arg)
11346 {
11347 	struct fcp_port *pptr = ADDR2FCP(ap);
11348 
11349 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11350 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11351 }
11352 
11353 
11354 static int
11355 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11356     ddi_eventcookie_t *event_cookiep)
11357 {
11358 	struct fcp_port *pptr = fcp_dip2port(dip);
11359 
11360 	if (pptr == NULL) {
11361 		return (DDI_FAILURE);
11362 	}
11363 
11364 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11365 	    event_cookiep, NDI_EVENT_NOPASS));
11366 }
11367 
11368 
11369 static int
11370 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11371     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11372     ddi_callback_id_t *cb_id)
11373 {
11374 	struct fcp_port *pptr = fcp_dip2port(dip);
11375 
11376 	if (pptr == NULL) {
11377 		return (DDI_FAILURE);
11378 	}
11379 
11380 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11381 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11382 }
11383 
11384 
11385 static int
11386 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11387 {
11388 
11389 	struct fcp_port *pptr = fcp_dip2port(dip);
11390 
11391 	if (pptr == NULL) {
11392 		return (DDI_FAILURE);
11393 	}
11394 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11395 }
11396 
11397 
11398 /*
11399  * called by the transport to post an event
11400  */
11401 static int
11402 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11403     ddi_eventcookie_t eventid, void *impldata)
11404 {
11405 	struct fcp_port *pptr = fcp_dip2port(dip);
11406 
11407 	if (pptr == NULL) {
11408 		return (DDI_FAILURE);
11409 	}
11410 
11411 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11412 	    eventid, impldata));
11413 }
11414 
11415 
11416 /*
11417  * A target in in many cases in Fibre Channel has a one to one relation
11418  * with a port identifier (which is also known as D_ID and also as AL_PA
11419  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11420  * will most likely result in resetting all LUNs (which means a reset will
11421  * occur on all the SCSI devices connected at the other end of the bridge)
11422  * That is the latest favorite topic for discussion, for, one can debate as
11423  * hot as one likes and come up with arguably a best solution to one's
11424  * satisfaction
11425  *
11426  * To stay on track and not digress much, here are the problems stated
11427  * briefly:
11428  *
11429  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11430  *	target drivers use RESET_TARGET even if their instance is on a
11431  *	LUN. Doesn't that sound a bit broken ?
11432  *
11433  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11434  *	control fields of an FCP_CMND structure. It should have been
11435  *	fixed right there, giving flexibility to the initiators to
11436  *	minimize havoc that could be caused by resetting a target.
11437  */
11438 static int
11439 fcp_reset_target(struct scsi_address *ap, int level)
11440 {
11441 	int			rval = FC_FAILURE;
11442 	char			lun_id[25];
11443 	struct fcp_port		*pptr = ADDR2FCP(ap);
11444 	struct fcp_lun	*plun = ADDR2LUN(ap);
11445 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11446 	struct scsi_pkt		*pkt;
11447 	struct fcp_pkt	*cmd;
11448 	struct fcp_rsp		*rsp;
11449 	uint32_t		tgt_cnt;
11450 	struct fcp_rsp_info	*rsp_info;
11451 	struct fcp_reset_elem	*p;
11452 	int			bval;
11453 
11454 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11455 	    KM_NOSLEEP)) == NULL) {
11456 		return (rval);
11457 	}
11458 
11459 	mutex_enter(&ptgt->tgt_mutex);
11460 	if (level == RESET_TARGET) {
11461 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11462 			mutex_exit(&ptgt->tgt_mutex);
11463 			kmem_free(p, sizeof (struct fcp_reset_elem));
11464 			return (rval);
11465 		}
11466 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11467 		(void) strcpy(lun_id, " ");
11468 	} else {
11469 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11470 			mutex_exit(&ptgt->tgt_mutex);
11471 			kmem_free(p, sizeof (struct fcp_reset_elem));
11472 			return (rval);
11473 		}
11474 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11475 
11476 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11477 	}
11478 	tgt_cnt = ptgt->tgt_change_cnt;
11479 
11480 	mutex_exit(&ptgt->tgt_mutex);
11481 
11482 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11483 	    0, 0, NULL, 0)) == NULL) {
11484 		kmem_free(p, sizeof (struct fcp_reset_elem));
11485 		mutex_enter(&ptgt->tgt_mutex);
11486 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11487 		mutex_exit(&ptgt->tgt_mutex);
11488 		return (rval);
11489 	}
11490 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11491 
11492 	/* fill in cmd part of packet */
11493 	cmd = PKT2CMD(pkt);
11494 	if (level == RESET_TARGET) {
11495 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11496 	} else {
11497 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11498 	}
11499 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11500 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11501 
11502 	/* prepare a packet for transport */
11503 	fcp_prepare_pkt(pptr, cmd, plun);
11504 
11505 	if (cmd->cmd_pkt->pkt_time) {
11506 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11507 	} else {
11508 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11509 	}
11510 
11511 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11512 	bval = fcp_dopoll(pptr, cmd);
11513 	fc_ulp_idle_port(pptr->port_fp_handle);
11514 
11515 	/* submit the packet */
11516 	if (bval == TRAN_ACCEPT) {
11517 		int error = 3;
11518 
11519 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11520 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11521 		    sizeof (struct fcp_rsp));
11522 
11523 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11524 			if (fcp_validate_fcp_response(rsp, pptr) ==
11525 			    FC_SUCCESS) {
11526 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11527 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11528 					    sizeof (struct fcp_rsp), rsp_info,
11529 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11530 					    sizeof (struct fcp_rsp_info));
11531 				}
11532 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11533 					rval = FC_SUCCESS;
11534 					error = 0;
11535 				} else {
11536 					error = 1;
11537 				}
11538 			} else {
11539 				error = 2;
11540 			}
11541 		}
11542 
11543 		switch (error) {
11544 		case 0:
11545 			fcp_log(CE_WARN, pptr->port_dip,
11546 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11547 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11548 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11549 			break;
11550 
11551 		case 1:
11552 			fcp_log(CE_WARN, pptr->port_dip,
11553 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11554 			    " response code=%x",
11555 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11556 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11557 			    rsp_info->rsp_code);
11558 			break;
11559 
11560 		case 2:
11561 			fcp_log(CE_WARN, pptr->port_dip,
11562 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11563 			    " Bad FCP response values: rsvd1=%x,"
11564 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11565 			    " rsplen=%x, senselen=%x",
11566 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11567 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11568 			    rsp->reserved_0, rsp->reserved_1,
11569 			    rsp->fcp_u.fcp_status.reserved_0,
11570 			    rsp->fcp_u.fcp_status.reserved_1,
11571 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11572 			break;
11573 
11574 		default:
11575 			fcp_log(CE_WARN, pptr->port_dip,
11576 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11577 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11578 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11579 			break;
11580 		}
11581 	}
11582 	scsi_destroy_pkt(pkt);
11583 
11584 	if (rval == FC_FAILURE) {
11585 		mutex_enter(&ptgt->tgt_mutex);
11586 		if (level == RESET_TARGET) {
11587 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11588 		} else {
11589 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11590 		}
11591 		mutex_exit(&ptgt->tgt_mutex);
11592 		kmem_free(p, sizeof (struct fcp_reset_elem));
11593 		return (rval);
11594 	}
11595 
11596 	mutex_enter(&pptr->port_mutex);
11597 	if (level == RESET_TARGET) {
11598 		p->tgt = ptgt;
11599 		p->lun = NULL;
11600 	} else {
11601 		p->tgt = NULL;
11602 		p->lun = plun;
11603 	}
11604 	p->tgt = ptgt;
11605 	p->tgt_cnt = tgt_cnt;
11606 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11607 	p->next = pptr->port_reset_list;
11608 	pptr->port_reset_list = p;
11609 
11610 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11611 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11612 	    "Notify ssd of the reset to reinstate the reservations");
11613 
11614 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11615 	    &pptr->port_reset_notify_listf);
11616 
11617 	mutex_exit(&pptr->port_mutex);
11618 
11619 	return (rval);
11620 }
11621 
11622 
11623 /*
11624  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11625  * SCSI capabilities
11626  */
11627 /* ARGSUSED */
11628 static int
11629 fcp_commoncap(struct scsi_address *ap, char *cap,
11630     int val, int tgtonly, int doset)
11631 {
11632 	struct fcp_port		*pptr = ADDR2FCP(ap);
11633 	struct fcp_lun	*plun = ADDR2LUN(ap);
11634 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11635 	int			cidx;
11636 	int			rval = FALSE;
11637 
11638 	if (cap == (char *)0) {
11639 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11640 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11641 		    "fcp_commoncap: invalid arg");
11642 		return (rval);
11643 	}
11644 
11645 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11646 		return (UNDEFINED);
11647 	}
11648 
11649 	/*
11650 	 * Process setcap request.
11651 	 */
11652 	if (doset) {
11653 		/*
11654 		 * At present, we can only set binary (0/1) values
11655 		 */
11656 		switch (cidx) {
11657 		case SCSI_CAP_ARQ:
11658 			if (val == 0) {
11659 				rval = FALSE;
11660 			} else {
11661 				rval = TRUE;
11662 			}
11663 			break;
11664 
11665 		case SCSI_CAP_LUN_RESET:
11666 			if (val) {
11667 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11668 			} else {
11669 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11670 			}
11671 			rval = TRUE;
11672 			break;
11673 
11674 		case SCSI_CAP_SECTOR_SIZE:
11675 			rval = TRUE;
11676 			break;
11677 		default:
11678 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11679 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11680 			    "fcp_setcap: unsupported %d", cidx);
11681 			rval = UNDEFINED;
11682 			break;
11683 		}
11684 
11685 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11686 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11687 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11688 		    "0x%x/0x%x/0x%x/%d",
11689 		    cap, val, tgtonly, doset, rval);
11690 
11691 	} else {
11692 		/*
11693 		 * Process getcap request.
11694 		 */
11695 		switch (cidx) {
11696 		case SCSI_CAP_DMA_MAX:
11697 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11698 
11699 			/*
11700 			 * Need to make an adjustment qlc is uint_t 64
11701 			 * st is int, so we will make the adjustment here
11702 			 * being as nobody wants to touch this.
11703 			 * It still leaves the max single block length
11704 			 * of 2 gig. This should last .
11705 			 */
11706 
11707 			if (rval == -1) {
11708 				rval = MAX_INT_DMA;
11709 			}
11710 
11711 			break;
11712 
11713 		case SCSI_CAP_INITIATOR_ID:
11714 			rval = pptr->port_id;
11715 			break;
11716 
11717 		case SCSI_CAP_ARQ:
11718 		case SCSI_CAP_RESET_NOTIFICATION:
11719 		case SCSI_CAP_TAGGED_QING:
11720 			rval = TRUE;
11721 			break;
11722 
11723 		case SCSI_CAP_SCSI_VERSION:
11724 			rval = 3;
11725 			break;
11726 
11727 		case SCSI_CAP_INTERCONNECT_TYPE:
11728 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11729 			    (ptgt->tgt_hard_addr == 0)) {
11730 				rval = INTERCONNECT_FABRIC;
11731 			} else {
11732 				rval = INTERCONNECT_FIBRE;
11733 			}
11734 			break;
11735 
11736 		case SCSI_CAP_LUN_RESET:
11737 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11738 			    TRUE : FALSE;
11739 			break;
11740 
11741 		default:
11742 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11743 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11744 			    "fcp_getcap: unsupported %d", cidx);
11745 			rval = UNDEFINED;
11746 			break;
11747 		}
11748 
11749 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11750 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11751 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11752 		    "0x%x/0x%x/0x%x/%d",
11753 		    cap, val, tgtonly, doset, rval);
11754 	}
11755 
11756 	return (rval);
11757 }
11758 
11759 /*
11760  * called by the transport to get the port-wwn and lun
11761  * properties of this device, and to create a "name" based on them
11762  *
11763  * these properties don't exist on sun4m
11764  *
11765  * return 1 for success else return 0
11766  */
11767 /* ARGSUSED */
11768 static int
11769 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11770 {
11771 	int			i;
11772 	int			*lun;
11773 	int			numChars;
11774 	uint_t			nlun;
11775 	uint_t			count;
11776 	uint_t			nbytes;
11777 	uchar_t			*bytes;
11778 	uint16_t		lun_num;
11779 	uint32_t		tgt_id;
11780 	char			**conf_wwn;
11781 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11782 	uchar_t			barray[FC_WWN_SIZE];
11783 	dev_info_t		*tgt_dip;
11784 	struct fcp_tgt	*ptgt;
11785 	struct fcp_port	*pptr;
11786 	struct fcp_lun	*plun;
11787 
11788 	ASSERT(sd != NULL);
11789 	ASSERT(name != NULL);
11790 
11791 	tgt_dip = sd->sd_dev;
11792 	pptr = ddi_get_soft_state(fcp_softstate,
11793 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11794 	if (pptr == NULL) {
11795 		return (0);
11796 	}
11797 
11798 	ASSERT(tgt_dip != NULL);
11799 
11800 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11801 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11802 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11803 		name[0] = '\0';
11804 		return (0);
11805 	}
11806 
11807 	if (nlun == 0) {
11808 		ddi_prop_free(lun);
11809 		return (0);
11810 	}
11811 
11812 	lun_num = lun[0];
11813 	ddi_prop_free(lun);
11814 
11815 	/*
11816 	 * Lookup for .conf WWN property
11817 	 */
11818 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11819 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11820 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11821 		ASSERT(count >= 1);
11822 
11823 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11824 		ddi_prop_free(conf_wwn);
11825 		mutex_enter(&pptr->port_mutex);
11826 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11827 			mutex_exit(&pptr->port_mutex);
11828 			return (0);
11829 		}
11830 		ptgt = plun->lun_tgt;
11831 		mutex_exit(&pptr->port_mutex);
11832 
11833 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11834 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11835 
11836 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11837 		    ptgt->tgt_hard_addr != 0) {
11838 			tgt_id = (uint32_t)fcp_alpa_to_switch[
11839 			    ptgt->tgt_hard_addr];
11840 		} else {
11841 			tgt_id = ptgt->tgt_d_id;
11842 		}
11843 
11844 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11845 		    TARGET_PROP, tgt_id);
11846 	}
11847 
11848 	/* get the our port-wwn property */
11849 	bytes = NULL;
11850 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11851 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11852 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11853 		if (bytes != NULL) {
11854 			ddi_prop_free(bytes);
11855 		}
11856 		return (0);
11857 	}
11858 
11859 	for (i = 0; i < FC_WWN_SIZE; i++) {
11860 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
11861 	}
11862 
11863 	/* Stick in the address of the form "wWWN,LUN" */
11864 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
11865 
11866 	ASSERT(numChars < len);
11867 	if (numChars >= len) {
11868 		fcp_log(CE_WARN, pptr->port_dip,
11869 		    "!fcp_scsi_get_name: "
11870 		    "name parameter length too small, it needs to be %d",
11871 		    numChars+1);
11872 	}
11873 
11874 	ddi_prop_free(bytes);
11875 
11876 	return (1);
11877 }
11878 
11879 
11880 /*
11881  * called by the transport to get the SCSI target id value, returning
11882  * it in "name"
11883  *
11884  * this isn't needed/used on sun4m
11885  *
11886  * return 1 for success else return 0
11887  */
11888 /* ARGSUSED */
11889 static int
11890 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
11891 {
11892 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
11893 	struct fcp_tgt	*ptgt;
11894 	int    numChars;
11895 
11896 	if (plun == NULL) {
11897 		return (0);
11898 	}
11899 
11900 	if ((ptgt = plun->lun_tgt) == NULL) {
11901 		return (0);
11902 	}
11903 
11904 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
11905 
11906 	ASSERT(numChars < len);
11907 	if (numChars >= len) {
11908 		fcp_log(CE_WARN, NULL,
11909 		    "!fcp_scsi_get_bus_addr: "
11910 		    "name parameter length too small, it needs to be %d",
11911 		    numChars+1);
11912 	}
11913 
11914 	return (1);
11915 }
11916 
11917 
11918 /*
11919  * called internally to reset the link where the specified port lives
11920  */
11921 static int
11922 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
11923 {
11924 	la_wwn_t		wwn;
11925 	struct fcp_lun	*plun;
11926 	struct fcp_tgt	*ptgt;
11927 
11928 	/* disable restart of lip if we're suspended */
11929 	mutex_enter(&pptr->port_mutex);
11930 
11931 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
11932 	    FCP_STATE_POWER_DOWN)) {
11933 		mutex_exit(&pptr->port_mutex);
11934 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11935 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
11936 		    "fcp_linkreset, fcp%d: link reset "
11937 		    "disabled due to DDI_SUSPEND",
11938 		    ddi_get_instance(pptr->port_dip));
11939 		return (FC_FAILURE);
11940 	}
11941 
11942 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
11943 		mutex_exit(&pptr->port_mutex);
11944 		return (FC_SUCCESS);
11945 	}
11946 
11947 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11948 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
11949 
11950 	/*
11951 	 * If ap == NULL assume local link reset.
11952 	 */
11953 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
11954 		plun = ADDR2LUN(ap);
11955 		ptgt = plun->lun_tgt;
11956 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
11957 	} else {
11958 		bzero((caddr_t)&wwn, sizeof (wwn));
11959 	}
11960 	mutex_exit(&pptr->port_mutex);
11961 
11962 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
11963 }
11964 
11965 
11966 /*
11967  * called from fcp_port_attach() to resume a port
11968  * return DDI_* success/failure status
11969  * acquires and releases the global mutex
11970  * acquires and releases the port mutex
11971  */
11972 /*ARGSUSED*/
11973 
11974 static int
11975 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
11976     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
11977 {
11978 	int			res = DDI_FAILURE; /* default result */
11979 	struct fcp_port	*pptr;		/* port state ptr */
11980 	uint32_t		alloc_cnt;
11981 	uint32_t		max_cnt;
11982 	fc_portmap_t		*tmp_list = NULL;
11983 
11984 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
11985 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
11986 	    instance);
11987 
11988 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
11989 		cmn_err(CE_WARN, "fcp: bad soft state");
11990 		return (res);
11991 	}
11992 
11993 	mutex_enter(&pptr->port_mutex);
11994 	switch (cmd) {
11995 	case FC_CMD_RESUME:
11996 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
11997 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
11998 		break;
11999 
12000 	case FC_CMD_POWER_UP:
12001 		/*
12002 		 * If the port is DDI_SUSPENded, defer rediscovery
12003 		 * until DDI_RESUME occurs
12004 		 */
12005 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12006 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12007 			mutex_exit(&pptr->port_mutex);
12008 			return (DDI_SUCCESS);
12009 		}
12010 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12011 	}
12012 	pptr->port_id = s_id;
12013 	pptr->port_state = FCP_STATE_INIT;
12014 	mutex_exit(&pptr->port_mutex);
12015 
12016 	/*
12017 	 * Make a copy of ulp_port_info as fctl allocates
12018 	 * a temp struct.
12019 	 */
12020 	(void) fcp_cp_pinfo(pptr, pinfo);
12021 
12022 	mutex_enter(&fcp_global_mutex);
12023 	if (fcp_watchdog_init++ == 0) {
12024 		fcp_watchdog_tick = fcp_watchdog_timeout *
12025 		    drv_usectohz(1000000);
12026 		fcp_watchdog_id = timeout(fcp_watch,
12027 		    NULL, fcp_watchdog_tick);
12028 	}
12029 	mutex_exit(&fcp_global_mutex);
12030 
12031 	/*
12032 	 * Handle various topologies and link states.
12033 	 */
12034 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12035 	case FC_STATE_OFFLINE:
12036 		/*
12037 		 * Wait for ONLINE, at which time a state
12038 		 * change will cause a statec_callback
12039 		 */
12040 		res = DDI_SUCCESS;
12041 		break;
12042 
12043 	case FC_STATE_ONLINE:
12044 
12045 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12046 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12047 			res = DDI_SUCCESS;
12048 			break;
12049 		}
12050 
12051 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12052 		    !fcp_enable_auto_configuration) {
12053 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12054 			if (tmp_list == NULL) {
12055 				if (!alloc_cnt) {
12056 					res = DDI_SUCCESS;
12057 				}
12058 				break;
12059 			}
12060 			max_cnt = alloc_cnt;
12061 		} else {
12062 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12063 
12064 			alloc_cnt = FCP_MAX_DEVICES;
12065 
12066 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12067 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12068 			    KM_NOSLEEP)) == NULL) {
12069 				fcp_log(CE_WARN, pptr->port_dip,
12070 				    "!fcp%d: failed to allocate portmap",
12071 				    instance);
12072 				break;
12073 			}
12074 
12075 			max_cnt = alloc_cnt;
12076 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12077 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12078 			    FC_SUCCESS) {
12079 				caddr_t msg;
12080 
12081 				(void) fc_ulp_error(res, &msg);
12082 
12083 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12084 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12085 				    "resume failed getportmap: reason=0x%x",
12086 				    res);
12087 
12088 				fcp_log(CE_WARN, pptr->port_dip,
12089 				    "!failed to get port map : %s", msg);
12090 				break;
12091 			}
12092 			if (max_cnt > alloc_cnt) {
12093 				alloc_cnt = max_cnt;
12094 			}
12095 		}
12096 
12097 		/*
12098 		 * do the SCSI device discovery and create
12099 		 * the devinfos
12100 		 */
12101 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12102 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12103 		    max_cnt, pptr->port_id);
12104 
12105 		res = DDI_SUCCESS;
12106 		break;
12107 
12108 	default:
12109 		fcp_log(CE_WARN, pptr->port_dip,
12110 		    "!fcp%d: invalid port state at attach=0x%x",
12111 		    instance, pptr->port_phys_state);
12112 
12113 		mutex_enter(&pptr->port_mutex);
12114 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12115 		mutex_exit(&pptr->port_mutex);
12116 		res = DDI_SUCCESS;
12117 
12118 		break;
12119 	}
12120 
12121 	if (tmp_list != NULL) {
12122 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12123 	}
12124 
12125 	return (res);
12126 }
12127 
12128 
12129 static void
12130 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12131 {
12132 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12133 	pptr->port_dip = pinfo->port_dip;
12134 	pptr->port_fp_handle = pinfo->port_handle;
12135 	pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12136 	pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12137 	pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12138 	pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12139 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12140 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12141 	pptr->port_phys_state = pinfo->port_state;
12142 	pptr->port_topology = pinfo->port_flags;
12143 	pptr->port_reset_action = pinfo->port_reset_action;
12144 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12145 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12146 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12147 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12148 }
12149 
12150 /*
12151  * If the elements wait field is set to 1 then
12152  * another thread is waiting for the operation to complete. Once
12153  * it is complete, the waiting thread is signaled and the element is
12154  * freed by the waiting thread. If the elements wait field is set to 0
12155  * the element is freed.
12156  */
12157 static void
12158 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12159 {
12160 	ASSERT(elem != NULL);
12161 	mutex_enter(&elem->mutex);
12162 	elem->result = result;
12163 	if (elem->wait) {
12164 		elem->wait = 0;
12165 		cv_signal(&elem->cv);
12166 		mutex_exit(&elem->mutex);
12167 	} else {
12168 		mutex_exit(&elem->mutex);
12169 		cv_destroy(&elem->cv);
12170 		mutex_destroy(&elem->mutex);
12171 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12172 	}
12173 }
12174 
12175 /*
12176  * This function is invoked from the taskq thread to allocate
12177  * devinfo nodes and to online/offline them.
12178  */
12179 static void
12180 fcp_hp_task(void *arg)
12181 {
12182 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12183 	struct fcp_lun	*plun = elem->lun;
12184 	struct fcp_port		*pptr = elem->port;
12185 	int			result;
12186 
12187 	ASSERT(elem->what == FCP_ONLINE ||
12188 	    elem->what == FCP_OFFLINE ||
12189 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12190 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12191 
12192 	mutex_enter(&pptr->port_mutex);
12193 	mutex_enter(&plun->lun_mutex);
12194 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12195 	    plun->lun_event_count != elem->event_cnt) ||
12196 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12197 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12198 		mutex_exit(&plun->lun_mutex);
12199 		mutex_exit(&pptr->port_mutex);
12200 		fcp_process_elem(elem, NDI_FAILURE);
12201 		return;
12202 	}
12203 	mutex_exit(&plun->lun_mutex);
12204 	mutex_exit(&pptr->port_mutex);
12205 
12206 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12207 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12208 	fcp_process_elem(elem, result);
12209 }
12210 
12211 
12212 static child_info_t *
12213 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12214     int tcount)
12215 {
12216 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12217 
12218 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12219 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12220 
12221 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12222 		/*
12223 		 * Child has not been created yet. Create the child device
12224 		 * based on the per-Lun flags.
12225 		 */
12226 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12227 			plun->lun_cip =
12228 			    CIP(fcp_create_dip(plun, lcount, tcount));
12229 			plun->lun_mpxio = 0;
12230 		} else {
12231 			plun->lun_cip =
12232 			    CIP(fcp_create_pip(plun, lcount, tcount));
12233 			plun->lun_mpxio = 1;
12234 		}
12235 	} else {
12236 		plun->lun_cip = cip;
12237 	}
12238 
12239 	return (plun->lun_cip);
12240 }
12241 
12242 
12243 static int
12244 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12245 {
12246 	int		rval = FC_FAILURE;
12247 	dev_info_t	*pdip;
12248 	struct dev_info	*dip;
12249 	int		circular;
12250 
12251 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12252 
12253 	pdip = plun->lun_tgt->tgt_port->port_dip;
12254 
12255 	if (plun->lun_cip == NULL) {
12256 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12257 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12258 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12259 		    "plun: %p lun state: %x num: %d target state: %x",
12260 		    plun, plun->lun_state, plun->lun_num,
12261 		    plun->lun_tgt->tgt_port->port_state);
12262 		return (rval);
12263 	}
12264 	ndi_devi_enter(pdip, &circular);
12265 	dip = DEVI(pdip)->devi_child;
12266 	while (dip) {
12267 		if (dip == DEVI(cdip)) {
12268 			rval = FC_SUCCESS;
12269 			break;
12270 		}
12271 		dip = dip->devi_sibling;
12272 	}
12273 	ndi_devi_exit(pdip, circular);
12274 	return (rval);
12275 }
12276 
12277 static int
12278 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12279 {
12280 	int		rval = FC_FAILURE;
12281 
12282 	ASSERT(plun != NULL);
12283 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12284 
12285 	if (plun->lun_mpxio == 0) {
12286 		rval = fcp_is_dip_present(plun, DIP(cip));
12287 	} else {
12288 		rval = fcp_is_pip_present(plun, PIP(cip));
12289 	}
12290 
12291 	return (rval);
12292 }
12293 
12294 /*
12295  *     Function: fcp_create_dip
12296  *
12297  *  Description: Creates a dev_info_t structure for the LUN specified by the
12298  *		 caller.
12299  *
12300  *     Argument: plun		Lun structure
12301  *		 link_cnt	Link state count.
12302  *		 tgt_cnt	Target state change count.
12303  *
12304  * Return Value: NULL if it failed
12305  *		 dev_info_t structure address if it succeeded
12306  *
12307  *	Context: Kernel context
12308  */
12309 static dev_info_t *
12310 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12311 {
12312 	int			failure = 0;
12313 	uint32_t		tgt_id;
12314 	uint64_t		sam_lun;
12315 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12316 	struct fcp_port	*pptr = ptgt->tgt_port;
12317 	dev_info_t		*pdip = pptr->port_dip;
12318 	dev_info_t		*cdip = NULL;
12319 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12320 	char			*nname = NULL;
12321 	char			**compatible = NULL;
12322 	int			ncompatible;
12323 	char			*scsi_binding_set;
12324 	char			t_pwwn[17];
12325 
12326 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12327 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12328 
12329 	/* get the 'scsi-binding-set' property */
12330 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12331 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12332 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12333 		scsi_binding_set = NULL;
12334 	}
12335 
12336 	/* determine the node name and compatible */
12337 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12338 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12339 	if (scsi_binding_set) {
12340 		ddi_prop_free(scsi_binding_set);
12341 	}
12342 
12343 	if (nname == NULL) {
12344 #ifdef	DEBUG
12345 		cmn_err(CE_WARN, "%s%d: no driver for "
12346 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12347 		    "	 compatible: %s",
12348 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12349 		    ptgt->tgt_port_wwn.raw_wwn[0],
12350 		    ptgt->tgt_port_wwn.raw_wwn[1],
12351 		    ptgt->tgt_port_wwn.raw_wwn[2],
12352 		    ptgt->tgt_port_wwn.raw_wwn[3],
12353 		    ptgt->tgt_port_wwn.raw_wwn[4],
12354 		    ptgt->tgt_port_wwn.raw_wwn[5],
12355 		    ptgt->tgt_port_wwn.raw_wwn[6],
12356 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12357 		    *compatible);
12358 #endif	/* DEBUG */
12359 		failure++;
12360 		goto end_of_fcp_create_dip;
12361 	}
12362 
12363 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12364 
12365 	/*
12366 	 * if the old_dip does not match the cdip, that means there is
12367 	 * some property change. since we'll be using the cdip, we need
12368 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12369 	 * then the dtype for the device has been updated. Offline the
12370 	 * the old device and create a new device with the new device type
12371 	 * Refer to bug: 4764752
12372 	 */
12373 	if (old_dip && (cdip != old_dip ||
12374 	    plun->lun_state & FCP_LUN_CHANGED)) {
12375 		plun->lun_state &= ~(FCP_LUN_INIT);
12376 		mutex_exit(&plun->lun_mutex);
12377 		mutex_exit(&pptr->port_mutex);
12378 
12379 		mutex_enter(&ptgt->tgt_mutex);
12380 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12381 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12382 		mutex_exit(&ptgt->tgt_mutex);
12383 
12384 #ifdef DEBUG
12385 		if (cdip != NULL) {
12386 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12387 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12388 			    "Old dip=%p; New dip=%p don't match", old_dip,
12389 			    cdip);
12390 		} else {
12391 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12392 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12393 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12394 		}
12395 #endif
12396 
12397 		mutex_enter(&pptr->port_mutex);
12398 		mutex_enter(&plun->lun_mutex);
12399 	}
12400 
12401 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12402 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12403 		if (ndi_devi_alloc(pptr->port_dip, nname,
12404 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12405 			failure++;
12406 			goto end_of_fcp_create_dip;
12407 		}
12408 	}
12409 
12410 	/*
12411 	 * Previously all the properties for the devinfo were destroyed here
12412 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12413 	 * the devid property (and other properties established by the target
12414 	 * driver or framework) which the code does not always recreate, this
12415 	 * call was removed.
12416 	 * This opens a theoretical possibility that we may return with a
12417 	 * stale devid on the node if the scsi entity behind the fibre channel
12418 	 * lun has changed.
12419 	 */
12420 
12421 	/* decorate the node with compatible */
12422 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12423 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12424 		failure++;
12425 		goto end_of_fcp_create_dip;
12426 	}
12427 
12428 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12429 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12430 		failure++;
12431 		goto end_of_fcp_create_dip;
12432 	}
12433 
12434 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12435 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12436 		failure++;
12437 		goto end_of_fcp_create_dip;
12438 	}
12439 
12440 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12441 	t_pwwn[16] = '\0';
12442 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12443 	    != DDI_PROP_SUCCESS) {
12444 		failure++;
12445 		goto end_of_fcp_create_dip;
12446 	}
12447 
12448 	/*
12449 	 * If there is no hard address - We might have to deal with
12450 	 * that by using WWN - Having said that it is important to
12451 	 * recognize this problem early so ssd can be informed of
12452 	 * the right interconnect type.
12453 	 */
12454 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12455 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12456 	} else {
12457 		tgt_id = ptgt->tgt_d_id;
12458 	}
12459 
12460 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12461 	    tgt_id) != DDI_PROP_SUCCESS) {
12462 		failure++;
12463 		goto end_of_fcp_create_dip;
12464 	}
12465 
12466 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12467 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12468 		failure++;
12469 		goto end_of_fcp_create_dip;
12470 	}
12471 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12472 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12473 	    sam_lun) != DDI_PROP_SUCCESS) {
12474 		failure++;
12475 		goto end_of_fcp_create_dip;
12476 	}
12477 
12478 end_of_fcp_create_dip:
12479 	scsi_hba_nodename_compatible_free(nname, compatible);
12480 
12481 	if (cdip != NULL && failure) {
12482 		(void) ndi_prop_remove_all(cdip);
12483 		(void) ndi_devi_free(cdip);
12484 		cdip = NULL;
12485 	}
12486 
12487 	return (cdip);
12488 }
12489 
12490 /*
12491  *     Function: fcp_create_pip
12492  *
12493  *  Description: Creates a Path Id for the LUN specified by the caller.
12494  *
12495  *     Argument: plun		Lun structure
12496  *		 link_cnt	Link state count.
12497  *		 tgt_cnt	Target state count.
12498  *
12499  * Return Value: NULL if it failed
12500  *		 mdi_pathinfo_t structure address if it succeeded
12501  *
12502  *	Context: Kernel context
12503  */
12504 static mdi_pathinfo_t *
12505 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12506 {
12507 	int			i;
12508 	char			buf[MAXNAMELEN];
12509 	char			uaddr[MAXNAMELEN];
12510 	int			failure = 0;
12511 	uint32_t		tgt_id;
12512 	uint64_t		sam_lun;
12513 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12514 	struct fcp_port	*pptr = ptgt->tgt_port;
12515 	dev_info_t		*pdip = pptr->port_dip;
12516 	mdi_pathinfo_t		*pip = NULL;
12517 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12518 	char			*nname = NULL;
12519 	char			**compatible = NULL;
12520 	int			ncompatible;
12521 	char			*scsi_binding_set;
12522 	char			t_pwwn[17];
12523 
12524 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12525 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12526 
12527 	scsi_binding_set = "vhci";
12528 
12529 	/* determine the node name and compatible */
12530 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12531 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12532 
12533 	if (nname == NULL) {
12534 #ifdef	DEBUG
12535 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12536 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12537 		    "	 compatible: %s",
12538 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12539 		    ptgt->tgt_port_wwn.raw_wwn[0],
12540 		    ptgt->tgt_port_wwn.raw_wwn[1],
12541 		    ptgt->tgt_port_wwn.raw_wwn[2],
12542 		    ptgt->tgt_port_wwn.raw_wwn[3],
12543 		    ptgt->tgt_port_wwn.raw_wwn[4],
12544 		    ptgt->tgt_port_wwn.raw_wwn[5],
12545 		    ptgt->tgt_port_wwn.raw_wwn[6],
12546 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12547 		    *compatible);
12548 #endif	/* DEBUG */
12549 		failure++;
12550 		goto end_of_fcp_create_pip;
12551 	}
12552 
12553 	pip = fcp_find_existing_pip(plun, pdip);
12554 
12555 	/*
12556 	 * if the old_dip does not match the cdip, that means there is
12557 	 * some property change. since we'll be using the cdip, we need
12558 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12559 	 * then the dtype for the device has been updated. Offline the
12560 	 * the old device and create a new device with the new device type
12561 	 * Refer to bug: 4764752
12562 	 */
12563 	if (old_pip && (pip != old_pip ||
12564 	    plun->lun_state & FCP_LUN_CHANGED)) {
12565 		plun->lun_state &= ~(FCP_LUN_INIT);
12566 		mutex_exit(&plun->lun_mutex);
12567 		mutex_exit(&pptr->port_mutex);
12568 
12569 		mutex_enter(&ptgt->tgt_mutex);
12570 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12571 		    FCP_OFFLINE, lcount, tcount,
12572 		    NDI_DEVI_REMOVE, 0);
12573 		mutex_exit(&ptgt->tgt_mutex);
12574 
12575 		if (pip != NULL) {
12576 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12577 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12578 			    "Old pip=%p; New pip=%p don't match",
12579 			    old_pip, pip);
12580 		} else {
12581 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12582 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12583 			    "Old pip=%p; New pip=NULL don't match",
12584 			    old_pip);
12585 		}
12586 
12587 		mutex_enter(&pptr->port_mutex);
12588 		mutex_enter(&plun->lun_mutex);
12589 	}
12590 
12591 	/*
12592 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12593 	 * lun_guid_size which is dependent on the target, I don't
12594 	 * believe the same trancation happens here UNLESS the standards
12595 	 * change the FC_WWN_SIZE value to something larger than
12596 	 * MAXNAMELEN(currently 255 bytes).
12597 	 */
12598 
12599 	for (i = 0; i < FC_WWN_SIZE; i++) {
12600 		(void) sprintf(&buf[i << 1], "%02x",
12601 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12602 	}
12603 
12604 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12605 	    buf, plun->lun_num);
12606 
12607 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12608 		/*
12609 		 * Release the locks before calling into
12610 		 * mdi_pi_alloc_compatible() since this can result in a
12611 		 * callback into fcp which can result in a deadlock
12612 		 * (see bug # 4870272).
12613 		 *
12614 		 * Basically, what we are trying to avoid is the scenario where
12615 		 * one thread does ndi_devi_enter() and tries to grab
12616 		 * fcp_mutex and another does it the other way round.
12617 		 *
12618 		 * But before we do that, make sure that nobody releases the
12619 		 * port in the meantime. We can do this by setting a flag.
12620 		 */
12621 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12622 		pptr->port_state |= FCP_STATE_IN_MDI;
12623 		mutex_exit(&plun->lun_mutex);
12624 		mutex_exit(&pptr->port_mutex);
12625 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12626 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12627 			fcp_log(CE_WARN, pptr->port_dip,
12628 			    "!path alloc failed:0x%x", plun);
12629 			mutex_enter(&pptr->port_mutex);
12630 			mutex_enter(&plun->lun_mutex);
12631 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12632 			failure++;
12633 			goto end_of_fcp_create_pip;
12634 		}
12635 		mutex_enter(&pptr->port_mutex);
12636 		mutex_enter(&plun->lun_mutex);
12637 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12638 	} else {
12639 		(void) mdi_prop_remove(pip, NULL);
12640 	}
12641 
12642 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12643 
12644 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12645 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12646 	    != DDI_PROP_SUCCESS) {
12647 		failure++;
12648 		goto end_of_fcp_create_pip;
12649 	}
12650 
12651 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12652 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12653 	    != DDI_PROP_SUCCESS) {
12654 		failure++;
12655 		goto end_of_fcp_create_pip;
12656 	}
12657 
12658 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12659 	t_pwwn[16] = '\0';
12660 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12661 	    != DDI_PROP_SUCCESS) {
12662 		failure++;
12663 		goto end_of_fcp_create_pip;
12664 	}
12665 
12666 	/*
12667 	 * If there is no hard address - We might have to deal with
12668 	 * that by using WWN - Having said that it is important to
12669 	 * recognize this problem early so ssd can be informed of
12670 	 * the right interconnect type.
12671 	 */
12672 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12673 	    ptgt->tgt_hard_addr != 0) {
12674 		tgt_id = (uint32_t)
12675 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12676 	} else {
12677 		tgt_id = ptgt->tgt_d_id;
12678 	}
12679 
12680 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12681 	    != DDI_PROP_SUCCESS) {
12682 		failure++;
12683 		goto end_of_fcp_create_pip;
12684 	}
12685 
12686 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12687 	    != DDI_PROP_SUCCESS) {
12688 		failure++;
12689 		goto end_of_fcp_create_pip;
12690 	}
12691 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12692 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12693 	    != DDI_PROP_SUCCESS) {
12694 		failure++;
12695 		goto end_of_fcp_create_pip;
12696 	}
12697 
12698 end_of_fcp_create_pip:
12699 	scsi_hba_nodename_compatible_free(nname, compatible);
12700 
12701 	if (pip != NULL && failure) {
12702 		(void) mdi_prop_remove(pip, NULL);
12703 		mutex_exit(&plun->lun_mutex);
12704 		mutex_exit(&pptr->port_mutex);
12705 		(void) mdi_pi_free(pip, 0);
12706 		mutex_enter(&pptr->port_mutex);
12707 		mutex_enter(&plun->lun_mutex);
12708 		pip = NULL;
12709 	}
12710 
12711 	return (pip);
12712 }
12713 
12714 static dev_info_t *
12715 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12716 {
12717 	uint_t			nbytes;
12718 	uchar_t			*bytes;
12719 	uint_t			nwords;
12720 	uint32_t		tgt_id;
12721 	int			*words;
12722 	dev_info_t		*cdip;
12723 	dev_info_t		*ndip;
12724 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12725 	struct fcp_port	*pptr = ptgt->tgt_port;
12726 	int			circular;
12727 
12728 	ndi_devi_enter(pdip, &circular);
12729 
12730 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12731 	while ((cdip = ndip) != NULL) {
12732 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12733 
12734 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12735 			continue;
12736 		}
12737 
12738 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12739 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12740 		    &nbytes) != DDI_PROP_SUCCESS) {
12741 			continue;
12742 		}
12743 
12744 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12745 			if (bytes != NULL) {
12746 				ddi_prop_free(bytes);
12747 			}
12748 			continue;
12749 		}
12750 		ASSERT(bytes != NULL);
12751 
12752 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12753 			ddi_prop_free(bytes);
12754 			continue;
12755 		}
12756 
12757 		ddi_prop_free(bytes);
12758 
12759 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12760 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12761 		    &nbytes) != DDI_PROP_SUCCESS) {
12762 			continue;
12763 		}
12764 
12765 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12766 			if (bytes != NULL) {
12767 				ddi_prop_free(bytes);
12768 			}
12769 			continue;
12770 		}
12771 		ASSERT(bytes != NULL);
12772 
12773 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12774 			ddi_prop_free(bytes);
12775 			continue;
12776 		}
12777 
12778 		ddi_prop_free(bytes);
12779 
12780 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12781 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12782 		    &nwords) != DDI_PROP_SUCCESS) {
12783 			continue;
12784 		}
12785 
12786 		if (nwords != 1 || words == NULL) {
12787 			if (words != NULL) {
12788 				ddi_prop_free(words);
12789 			}
12790 			continue;
12791 		}
12792 		ASSERT(words != NULL);
12793 
12794 		/*
12795 		 * If there is no hard address - We might have to deal with
12796 		 * that by using WWN - Having said that it is important to
12797 		 * recognize this problem early so ssd can be informed of
12798 		 * the right interconnect type.
12799 		 */
12800 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12801 		    ptgt->tgt_hard_addr != 0) {
12802 			tgt_id =
12803 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12804 		} else {
12805 			tgt_id = ptgt->tgt_d_id;
12806 		}
12807 
12808 		if (tgt_id != (uint32_t)*words) {
12809 			ddi_prop_free(words);
12810 			continue;
12811 		}
12812 		ddi_prop_free(words);
12813 
12814 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12815 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12816 		    &nwords) != DDI_PROP_SUCCESS) {
12817 			continue;
12818 		}
12819 
12820 		if (nwords != 1 || words == NULL) {
12821 			if (words != NULL) {
12822 				ddi_prop_free(words);
12823 			}
12824 			continue;
12825 		}
12826 		ASSERT(words != NULL);
12827 
12828 		if (plun->lun_num == (uint16_t)*words) {
12829 			ddi_prop_free(words);
12830 			break;
12831 		}
12832 		ddi_prop_free(words);
12833 	}
12834 	ndi_devi_exit(pdip, circular);
12835 
12836 	return (cdip);
12837 }
12838 
12839 
12840 static int
12841 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12842 {
12843 	dev_info_t	*pdip;
12844 	char		buf[MAXNAMELEN];
12845 	char		uaddr[MAXNAMELEN];
12846 	int		rval = FC_FAILURE;
12847 
12848 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12849 
12850 	pdip = plun->lun_tgt->tgt_port->port_dip;
12851 
12852 	/*
12853 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
12854 	 * non-NULL even when the LUN is not there as in the case when a LUN is
12855 	 * configured and then deleted on the device end (for T3/T4 case). In
12856 	 * such cases, pip will be NULL.
12857 	 *
12858 	 * If the device generates an RSCN, it will end up getting offlined when
12859 	 * it disappeared and a new LUN will get created when it is rediscovered
12860 	 * on the device. If we check for lun_cip here, the LUN will not end
12861 	 * up getting onlined since this function will end up returning a
12862 	 * FC_SUCCESS.
12863 	 *
12864 	 * The behavior is different on other devices. For instance, on a HDS,
12865 	 * there was no RSCN generated by the device but the next I/O generated
12866 	 * a check condition and rediscovery got triggered that way. So, in
12867 	 * such cases, this path will not be exercised
12868 	 */
12869 	if (pip == NULL) {
12870 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12871 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
12872 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
12873 		    "plun: %p lun state: %x num: %d target state: %x",
12874 		    plun, plun->lun_state, plun->lun_num,
12875 		    plun->lun_tgt->tgt_port->port_state);
12876 		return (rval);
12877 	}
12878 
12879 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
12880 
12881 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12882 
12883 	if (plun->lun_old_guid) {
12884 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
12885 			rval = FC_SUCCESS;
12886 		}
12887 	} else {
12888 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
12889 			rval = FC_SUCCESS;
12890 		}
12891 	}
12892 	return (rval);
12893 }
12894 
12895 static mdi_pathinfo_t *
12896 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
12897 {
12898 	char			buf[MAXNAMELEN];
12899 	char			uaddr[MAXNAMELEN];
12900 	mdi_pathinfo_t		*pip;
12901 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12902 	struct fcp_port	*pptr = ptgt->tgt_port;
12903 
12904 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12905 
12906 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
12907 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12908 
12909 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
12910 
12911 	return (pip);
12912 }
12913 
12914 
12915 static int
12916 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
12917     int tcount, int flags, int *circ)
12918 {
12919 	int			rval;
12920 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
12921 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12922 	dev_info_t		*cdip = NULL;
12923 
12924 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12925 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12926 
12927 	if (plun->lun_cip == NULL) {
12928 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12929 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12930 		    "fcp_online_child: plun->lun_cip is NULL: "
12931 		    "plun: %p state: %x num: %d target state: %x",
12932 		    plun, plun->lun_state, plun->lun_num,
12933 		    plun->lun_tgt->tgt_port->port_state);
12934 		return (NDI_FAILURE);
12935 	}
12936 again:
12937 	if (plun->lun_mpxio == 0) {
12938 		cdip = DIP(cip);
12939 		mutex_exit(&plun->lun_mutex);
12940 		mutex_exit(&pptr->port_mutex);
12941 
12942 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12943 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12944 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
12945 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12946 
12947 		/*
12948 		 * We could check for FCP_LUN_INIT here but chances
12949 		 * of getting here when it's already in FCP_LUN_INIT
12950 		 * is rare and a duplicate ndi_devi_online wouldn't
12951 		 * hurt either (as the node would already have been
12952 		 * in CF2)
12953 		 */
12954 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
12955 			rval = ndi_devi_bind_driver(cdip, flags);
12956 		} else {
12957 			rval = ndi_devi_online(cdip, flags);
12958 		}
12959 		/*
12960 		 * We log the message into trace buffer if the device
12961 		 * is "ses" and into syslog for any other device
12962 		 * type. This is to prevent the ndi_devi_online failure
12963 		 * message that appears for V880/A5K ses devices.
12964 		 */
12965 		if (rval == NDI_SUCCESS) {
12966 			mutex_enter(&ptgt->tgt_mutex);
12967 			plun->lun_state |= FCP_LUN_INIT;
12968 			mutex_exit(&ptgt->tgt_mutex);
12969 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
12970 			fcp_log(CE_NOTE, pptr->port_dip,
12971 			    "!ndi_devi_online:"
12972 			    " failed for %s: target=%x lun=%x %x",
12973 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12974 			    plun->lun_num, rval);
12975 		} else {
12976 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12977 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
12978 			    " !ndi_devi_online:"
12979 			    " failed for %s: target=%x lun=%x %x",
12980 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12981 			    plun->lun_num, rval);
12982 		}
12983 	} else {
12984 		cdip = mdi_pi_get_client(PIP(cip));
12985 		mutex_exit(&plun->lun_mutex);
12986 		mutex_exit(&pptr->port_mutex);
12987 
12988 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12989 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12990 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
12991 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12992 
12993 		/*
12994 		 * Hold path and exit phci to avoid deadlock with power
12995 		 * management code during mdi_pi_online.
12996 		 */
12997 		mdi_hold_path(PIP(cip));
12998 		mdi_devi_exit_phci(pptr->port_dip, *circ);
12999 
13000 		rval = mdi_pi_online(PIP(cip), flags);
13001 
13002 		mdi_devi_enter_phci(pptr->port_dip, circ);
13003 		mdi_rele_path(PIP(cip));
13004 
13005 		if (rval == MDI_SUCCESS) {
13006 			mutex_enter(&ptgt->tgt_mutex);
13007 			plun->lun_state |= FCP_LUN_INIT;
13008 			mutex_exit(&ptgt->tgt_mutex);
13009 
13010 			/*
13011 			 * Clear MPxIO path permanent disable in case
13012 			 * fcp hotplug dropped the offline event.
13013 			 */
13014 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13015 
13016 		} else if (rval == MDI_NOT_SUPPORTED) {
13017 			child_info_t	*old_cip = cip;
13018 
13019 			/*
13020 			 * MPxIO does not support this device yet.
13021 			 * Enumerate in legacy mode.
13022 			 */
13023 			mutex_enter(&pptr->port_mutex);
13024 			mutex_enter(&plun->lun_mutex);
13025 			plun->lun_mpxio = 0;
13026 			plun->lun_cip = NULL;
13027 			cdip = fcp_create_dip(plun, lcount, tcount);
13028 			plun->lun_cip = cip = CIP(cdip);
13029 			if (cip == NULL) {
13030 				fcp_log(CE_WARN, pptr->port_dip,
13031 				    "!fcp_online_child: "
13032 				    "Create devinfo failed for LU=%p", plun);
13033 				mutex_exit(&plun->lun_mutex);
13034 
13035 				mutex_enter(&ptgt->tgt_mutex);
13036 				plun->lun_state |= FCP_LUN_OFFLINE;
13037 				mutex_exit(&ptgt->tgt_mutex);
13038 
13039 				mutex_exit(&pptr->port_mutex);
13040 
13041 				/*
13042 				 * free the mdi_pathinfo node
13043 				 */
13044 				(void) mdi_pi_free(PIP(old_cip), 0);
13045 			} else {
13046 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13047 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13048 				    "fcp_online_child: creating devinfo "
13049 				    "node 0x%p for plun 0x%p",
13050 				    cip, plun);
13051 				mutex_exit(&plun->lun_mutex);
13052 				mutex_exit(&pptr->port_mutex);
13053 				/*
13054 				 * free the mdi_pathinfo node
13055 				 */
13056 				(void) mdi_pi_free(PIP(old_cip), 0);
13057 				mutex_enter(&pptr->port_mutex);
13058 				mutex_enter(&plun->lun_mutex);
13059 				goto again;
13060 			}
13061 		} else {
13062 			if (cdip) {
13063 				fcp_log(CE_NOTE, pptr->port_dip,
13064 				    "!fcp_online_child: mdi_pi_online:"
13065 				    " failed for %s: target=%x lun=%x %x",
13066 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13067 				    plun->lun_num, rval);
13068 			}
13069 		}
13070 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13071 	}
13072 
13073 	if (rval == NDI_SUCCESS) {
13074 		if (cdip) {
13075 			(void) ndi_event_retrieve_cookie(
13076 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13077 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13078 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13079 			    cdip, fcp_insert_eid, NULL);
13080 		}
13081 	}
13082 	mutex_enter(&pptr->port_mutex);
13083 	mutex_enter(&plun->lun_mutex);
13084 	return (rval);
13085 }
13086 
13087 /* ARGSUSED */
13088 static int
13089 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13090     int tcount, int flags, int *circ)
13091 {
13092 	int rval;
13093 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13094 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13095 	dev_info_t		*cdip;
13096 
13097 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13098 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13099 
13100 	if (plun->lun_cip == NULL) {
13101 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13102 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13103 		    "fcp_offline_child: plun->lun_cip is NULL: "
13104 		    "plun: %p lun state: %x num: %d target state: %x",
13105 		    plun, plun->lun_state, plun->lun_num,
13106 		    plun->lun_tgt->tgt_port->port_state);
13107 		return (NDI_FAILURE);
13108 	}
13109 
13110 	if (plun->lun_mpxio == 0) {
13111 		cdip = DIP(cip);
13112 		mutex_exit(&plun->lun_mutex);
13113 		mutex_exit(&pptr->port_mutex);
13114 		rval = ndi_devi_offline(DIP(cip), flags);
13115 		if (rval != NDI_SUCCESS) {
13116 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13117 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13118 			    "fcp_offline_child: ndi_devi_offline failed "
13119 			    "rval=%x cip=%p", rval, cip);
13120 		}
13121 	} else {
13122 		cdip = mdi_pi_get_client(PIP(cip));
13123 		mutex_exit(&plun->lun_mutex);
13124 		mutex_exit(&pptr->port_mutex);
13125 
13126 		/*
13127 		 * Exit phci to avoid deadlock with power management code
13128 		 * during mdi_pi_offline
13129 		 */
13130 		mdi_hold_path(PIP(cip));
13131 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13132 
13133 		rval = mdi_pi_offline(PIP(cip), flags);
13134 
13135 		mdi_devi_enter_phci(pptr->port_dip, circ);
13136 		mdi_rele_path(PIP(cip));
13137 
13138 		if (rval == MDI_SUCCESS) {
13139 			/*
13140 			 * Clear MPxIO path permanent disable as the path is
13141 			 * already offlined.
13142 			 */
13143 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13144 
13145 			if (flags & NDI_DEVI_REMOVE) {
13146 				(void) mdi_pi_free(PIP(cip), 0);
13147 			}
13148 		} else {
13149 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13150 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13151 			    "fcp_offline_child: mdi_pi_offline failed "
13152 			    "rval=%x cip=%p", rval, cip);
13153 		}
13154 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13155 	}
13156 
13157 	mutex_enter(&ptgt->tgt_mutex);
13158 	plun->lun_state &= ~FCP_LUN_INIT;
13159 	mutex_exit(&ptgt->tgt_mutex);
13160 
13161 	mutex_enter(&pptr->port_mutex);
13162 	mutex_enter(&plun->lun_mutex);
13163 
13164 	if (rval == NDI_SUCCESS) {
13165 		cdip = NULL;
13166 		if (flags & NDI_DEVI_REMOVE) {
13167 			/*
13168 			 * If the guid of the LUN changes, lun_cip will not
13169 			 * equal to cip, and after offlining the LUN with the
13170 			 * old guid, we should keep lun_cip since it's the cip
13171 			 * of the LUN with the new guid.
13172 			 * Otherwise remove our reference to child node.
13173 			 */
13174 			if (plun->lun_cip == cip) {
13175 				plun->lun_cip = NULL;
13176 			}
13177 			if (plun->lun_old_guid) {
13178 				kmem_free(plun->lun_old_guid,
13179 				    plun->lun_old_guid_size);
13180 				plun->lun_old_guid = NULL;
13181 				plun->lun_old_guid_size = 0;
13182 			}
13183 		}
13184 	}
13185 
13186 	if (cdip) {
13187 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13188 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13189 		    " target=%x lun=%x", "ndi_offline",
13190 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13191 	}
13192 
13193 	return (rval);
13194 }
13195 
13196 static void
13197 fcp_remove_child(struct fcp_lun *plun)
13198 {
13199 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13200 
13201 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13202 		if (plun->lun_mpxio == 0) {
13203 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13204 			(void) ndi_devi_free(DIP(plun->lun_cip));
13205 		} else {
13206 			mutex_exit(&plun->lun_mutex);
13207 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13208 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13209 			FCP_TRACE(fcp_logq,
13210 			    plun->lun_tgt->tgt_port->port_instbuf,
13211 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13212 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13213 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13214 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13215 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13216 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13217 			mutex_enter(&plun->lun_mutex);
13218 		}
13219 	}
13220 
13221 	plun->lun_cip = NULL;
13222 }
13223 
13224 /*
13225  * called when a timeout occurs
13226  *
13227  * can be scheduled during an attach or resume (if not already running)
13228  *
13229  * one timeout is set up for all ports
13230  *
13231  * acquires and releases the global mutex
13232  */
13233 /*ARGSUSED*/
13234 static void
13235 fcp_watch(void *arg)
13236 {
13237 	struct fcp_port	*pptr;
13238 	struct fcp_ipkt	*icmd;
13239 	struct fcp_ipkt	*nicmd;
13240 	struct fcp_pkt	*cmd;
13241 	struct fcp_pkt	*ncmd;
13242 	struct fcp_pkt	*tail;
13243 	struct fcp_pkt	*pcmd;
13244 	struct fcp_pkt	*save_head;
13245 	struct fcp_port	*save_port;
13246 
13247 	/* increment global watchdog time */
13248 	fcp_watchdog_time += fcp_watchdog_timeout;
13249 
13250 	mutex_enter(&fcp_global_mutex);
13251 
13252 	/* scan each port in our list */
13253 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13254 		save_port = fcp_port_head;
13255 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13256 		mutex_exit(&fcp_global_mutex);
13257 
13258 		mutex_enter(&pptr->port_mutex);
13259 		if (pptr->port_ipkt_list == NULL &&
13260 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13261 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13262 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13263 			mutex_exit(&pptr->port_mutex);
13264 			mutex_enter(&fcp_global_mutex);
13265 			goto end_of_watchdog;
13266 		}
13267 
13268 		/*
13269 		 * We check if a list of targets need to be offlined.
13270 		 */
13271 		if (pptr->port_offline_tgts) {
13272 			fcp_scan_offline_tgts(pptr);
13273 		}
13274 
13275 		/*
13276 		 * We check if a list of luns need to be offlined.
13277 		 */
13278 		if (pptr->port_offline_luns) {
13279 			fcp_scan_offline_luns(pptr);
13280 		}
13281 
13282 		/*
13283 		 * We check if a list of targets or luns need to be reset.
13284 		 */
13285 		if (pptr->port_reset_list) {
13286 			fcp_check_reset_delay(pptr);
13287 		}
13288 
13289 		mutex_exit(&pptr->port_mutex);
13290 
13291 		/*
13292 		 * This is where the pending commands (pkt) are checked for
13293 		 * timeout.
13294 		 */
13295 		mutex_enter(&pptr->port_pkt_mutex);
13296 		tail = pptr->port_pkt_tail;
13297 
13298 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13299 		    cmd != NULL; cmd = ncmd) {
13300 			ncmd = cmd->cmd_next;
13301 			/*
13302 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13303 			 * must be set.
13304 			 */
13305 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13306 			/*
13307 			 * FCP_INVALID_TIMEOUT will be set for those
13308 			 * command that need to be failed. Mostly those
13309 			 * cmds that could not be queued down for the
13310 			 * "timeout" value. cmd->cmd_timeout is used
13311 			 * to try and requeue the command regularly.
13312 			 */
13313 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13314 				/*
13315 				 * This command hasn't timed out yet.  Let's
13316 				 * go to the next one.
13317 				 */
13318 				pcmd = cmd;
13319 				goto end_of_loop;
13320 			}
13321 
13322 			if (cmd == pptr->port_pkt_head) {
13323 				ASSERT(pcmd == NULL);
13324 				pptr->port_pkt_head = cmd->cmd_next;
13325 			} else {
13326 				ASSERT(pcmd != NULL);
13327 				pcmd->cmd_next = cmd->cmd_next;
13328 			}
13329 
13330 			if (cmd == pptr->port_pkt_tail) {
13331 				ASSERT(cmd->cmd_next == NULL);
13332 				pptr->port_pkt_tail = pcmd;
13333 				if (pcmd) {
13334 					pcmd->cmd_next = NULL;
13335 				}
13336 			}
13337 			cmd->cmd_next = NULL;
13338 
13339 			/*
13340 			 * save the current head before dropping the
13341 			 * mutex - If the head doesn't remain the
13342 			 * same after re acquiring the mutex, just
13343 			 * bail out and revisit on next tick.
13344 			 *
13345 			 * PS: The tail pointer can change as the commands
13346 			 * get requeued after failure to retransport
13347 			 */
13348 			save_head = pptr->port_pkt_head;
13349 			mutex_exit(&pptr->port_pkt_mutex);
13350 
13351 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13352 			    FCP_INVALID_TIMEOUT) {
13353 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13354 				struct fcp_lun	*plun;
13355 				struct fcp_tgt	*ptgt;
13356 
13357 				plun = ADDR2LUN(&pkt->pkt_address);
13358 				ptgt = plun->lun_tgt;
13359 
13360 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13361 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13362 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13363 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13364 
13365 				cmd->cmd_state == FCP_PKT_ABORTING ?
13366 				    fcp_fail_cmd(cmd, CMD_RESET,
13367 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13368 				    CMD_TIMEOUT, STAT_ABORTED);
13369 			} else {
13370 				fcp_retransport_cmd(pptr, cmd);
13371 			}
13372 			mutex_enter(&pptr->port_pkt_mutex);
13373 			if (save_head && save_head != pptr->port_pkt_head) {
13374 				/*
13375 				 * Looks like linked list got changed (mostly
13376 				 * happens when an an OFFLINE LUN code starts
13377 				 * returning overflow queue commands in
13378 				 * parallel. So bail out and revisit during
13379 				 * next tick
13380 				 */
13381 				break;
13382 			}
13383 		end_of_loop:
13384 			/*
13385 			 * Scan only upto the previously known tail pointer
13386 			 * to avoid excessive processing - lots of new packets
13387 			 * could have been added to the tail or the old ones
13388 			 * re-queued.
13389 			 */
13390 			if (cmd == tail) {
13391 				break;
13392 			}
13393 		}
13394 		mutex_exit(&pptr->port_pkt_mutex);
13395 
13396 		mutex_enter(&pptr->port_mutex);
13397 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13398 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13399 
13400 			nicmd = icmd->ipkt_next;
13401 			if ((icmd->ipkt_restart != 0) &&
13402 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13403 				/* packet has not timed out */
13404 				continue;
13405 			}
13406 
13407 			/* time for packet re-transport */
13408 			if (icmd == pptr->port_ipkt_list) {
13409 				pptr->port_ipkt_list = icmd->ipkt_next;
13410 				if (pptr->port_ipkt_list) {
13411 					pptr->port_ipkt_list->ipkt_prev =
13412 					    NULL;
13413 				}
13414 			} else {
13415 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13416 				if (icmd->ipkt_next) {
13417 					icmd->ipkt_next->ipkt_prev =
13418 					    icmd->ipkt_prev;
13419 				}
13420 			}
13421 			icmd->ipkt_next = NULL;
13422 			icmd->ipkt_prev = NULL;
13423 			mutex_exit(&pptr->port_mutex);
13424 
13425 			if (fcp_is_retryable(icmd)) {
13426 				fc_ulp_rscn_info_t *rscnp =
13427 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13428 				    pkt_ulp_rscn_infop;
13429 
13430 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13431 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13432 				    "%x to D_ID=%x Retrying..",
13433 				    icmd->ipkt_opcode,
13434 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13435 
13436 				/*
13437 				 * Update the RSCN count in the packet
13438 				 * before resending.
13439 				 */
13440 
13441 				if (rscnp != NULL) {
13442 					rscnp->ulp_rscn_count =
13443 					    fc_ulp_get_rscn_count(pptr->
13444 					    port_fp_handle);
13445 				}
13446 
13447 				mutex_enter(&pptr->port_mutex);
13448 				mutex_enter(&ptgt->tgt_mutex);
13449 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13450 					mutex_exit(&ptgt->tgt_mutex);
13451 					mutex_exit(&pptr->port_mutex);
13452 					switch (icmd->ipkt_opcode) {
13453 						int rval;
13454 					case LA_ELS_PLOGI:
13455 						if ((rval = fc_ulp_login(
13456 						    pptr->port_fp_handle,
13457 						    &icmd->ipkt_fpkt, 1)) ==
13458 						    FC_SUCCESS) {
13459 							mutex_enter(
13460 							    &pptr->port_mutex);
13461 							continue;
13462 						}
13463 						if (fcp_handle_ipkt_errors(
13464 						    pptr, ptgt, icmd, rval,
13465 						    "PLOGI") == DDI_SUCCESS) {
13466 							mutex_enter(
13467 							    &pptr->port_mutex);
13468 							continue;
13469 						}
13470 						break;
13471 
13472 					case LA_ELS_PRLI:
13473 						if ((rval = fc_ulp_issue_els(
13474 						    pptr->port_fp_handle,
13475 						    icmd->ipkt_fpkt)) ==
13476 						    FC_SUCCESS) {
13477 							mutex_enter(
13478 							    &pptr->port_mutex);
13479 							continue;
13480 						}
13481 						if (fcp_handle_ipkt_errors(
13482 						    pptr, ptgt, icmd, rval,
13483 						    "PRLI") == DDI_SUCCESS) {
13484 							mutex_enter(
13485 							    &pptr->port_mutex);
13486 							continue;
13487 						}
13488 						break;
13489 
13490 					default:
13491 						if ((rval = fcp_transport(
13492 						    pptr->port_fp_handle,
13493 						    icmd->ipkt_fpkt, 1)) ==
13494 						    FC_SUCCESS) {
13495 							mutex_enter(
13496 							    &pptr->port_mutex);
13497 							continue;
13498 						}
13499 						if (fcp_handle_ipkt_errors(
13500 						    pptr, ptgt, icmd, rval,
13501 						    "PRLI") == DDI_SUCCESS) {
13502 							mutex_enter(
13503 							    &pptr->port_mutex);
13504 							continue;
13505 						}
13506 						break;
13507 					}
13508 				} else {
13509 					mutex_exit(&ptgt->tgt_mutex);
13510 					mutex_exit(&pptr->port_mutex);
13511 				}
13512 			} else {
13513 				fcp_print_error(icmd->ipkt_fpkt);
13514 			}
13515 
13516 			(void) fcp_call_finish_init(pptr, ptgt,
13517 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13518 			    icmd->ipkt_cause);
13519 			fcp_icmd_free(pptr, icmd);
13520 			mutex_enter(&pptr->port_mutex);
13521 		}
13522 
13523 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13524 		mutex_exit(&pptr->port_mutex);
13525 		mutex_enter(&fcp_global_mutex);
13526 
13527 	end_of_watchdog:
13528 		/*
13529 		 * Bail out early before getting into trouble
13530 		 */
13531 		if (save_port != fcp_port_head) {
13532 			break;
13533 		}
13534 	}
13535 
13536 	if (fcp_watchdog_init > 0) {
13537 		/* reschedule timeout to go again */
13538 		fcp_watchdog_id =
13539 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13540 	}
13541 	mutex_exit(&fcp_global_mutex);
13542 }
13543 
13544 
13545 static void
13546 fcp_check_reset_delay(struct fcp_port *pptr)
13547 {
13548 	uint32_t		tgt_cnt;
13549 	int			level;
13550 	struct fcp_tgt	*ptgt;
13551 	struct fcp_lun	*plun;
13552 	struct fcp_reset_elem *cur = NULL;
13553 	struct fcp_reset_elem *next = NULL;
13554 	struct fcp_reset_elem *prev = NULL;
13555 
13556 	ASSERT(mutex_owned(&pptr->port_mutex));
13557 
13558 	next = pptr->port_reset_list;
13559 	while ((cur = next) != NULL) {
13560 		next = cur->next;
13561 
13562 		if (cur->timeout < fcp_watchdog_time) {
13563 			prev = cur;
13564 			continue;
13565 		}
13566 
13567 		ptgt = cur->tgt;
13568 		plun = cur->lun;
13569 		tgt_cnt = cur->tgt_cnt;
13570 
13571 		if (ptgt) {
13572 			level = RESET_TARGET;
13573 		} else {
13574 			ASSERT(plun != NULL);
13575 			level = RESET_LUN;
13576 			ptgt = plun->lun_tgt;
13577 		}
13578 		if (prev) {
13579 			prev->next = next;
13580 		} else {
13581 			/*
13582 			 * Because we drop port mutex while doing aborts for
13583 			 * packets, we can't rely on reset_list pointing to
13584 			 * our head
13585 			 */
13586 			if (cur == pptr->port_reset_list) {
13587 				pptr->port_reset_list = next;
13588 			} else {
13589 				struct fcp_reset_elem *which;
13590 
13591 				which = pptr->port_reset_list;
13592 				while (which && which->next != cur) {
13593 					which = which->next;
13594 				}
13595 				ASSERT(which != NULL);
13596 
13597 				which->next = next;
13598 				prev = which;
13599 			}
13600 		}
13601 
13602 		kmem_free(cur, sizeof (*cur));
13603 
13604 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13605 			mutex_enter(&ptgt->tgt_mutex);
13606 			if (level == RESET_TARGET) {
13607 				fcp_update_tgt_state(ptgt,
13608 				    FCP_RESET, FCP_LUN_BUSY);
13609 			} else {
13610 				fcp_update_lun_state(plun,
13611 				    FCP_RESET, FCP_LUN_BUSY);
13612 			}
13613 			mutex_exit(&ptgt->tgt_mutex);
13614 
13615 			mutex_exit(&pptr->port_mutex);
13616 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13617 			mutex_enter(&pptr->port_mutex);
13618 		}
13619 	}
13620 }
13621 
13622 
13623 static void
13624 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13625     struct fcp_lun *rlun, int tgt_cnt)
13626 {
13627 	int			rval;
13628 	struct fcp_lun	*tlun, *nlun;
13629 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13630 	    *cmd = NULL, *head = NULL,
13631 	    *tail = NULL;
13632 
13633 	mutex_enter(&pptr->port_pkt_mutex);
13634 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13635 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13636 		struct fcp_tgt *ptgt = plun->lun_tgt;
13637 
13638 		ncmd = cmd->cmd_next;
13639 
13640 		if (ptgt != ttgt && plun != rlun) {
13641 			pcmd = cmd;
13642 			continue;
13643 		}
13644 
13645 		if (pcmd != NULL) {
13646 			ASSERT(pptr->port_pkt_head != cmd);
13647 			pcmd->cmd_next = ncmd;
13648 		} else {
13649 			ASSERT(cmd == pptr->port_pkt_head);
13650 			pptr->port_pkt_head = ncmd;
13651 		}
13652 		if (pptr->port_pkt_tail == cmd) {
13653 			ASSERT(cmd->cmd_next == NULL);
13654 			pptr->port_pkt_tail = pcmd;
13655 			if (pcmd != NULL) {
13656 				pcmd->cmd_next = NULL;
13657 			}
13658 		}
13659 
13660 		if (head == NULL) {
13661 			head = tail = cmd;
13662 		} else {
13663 			ASSERT(tail != NULL);
13664 			tail->cmd_next = cmd;
13665 			tail = cmd;
13666 		}
13667 		cmd->cmd_next = NULL;
13668 	}
13669 	mutex_exit(&pptr->port_pkt_mutex);
13670 
13671 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13672 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13673 
13674 		ncmd = cmd->cmd_next;
13675 		ASSERT(pkt != NULL);
13676 
13677 		mutex_enter(&pptr->port_mutex);
13678 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13679 			mutex_exit(&pptr->port_mutex);
13680 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13681 			pkt->pkt_reason = CMD_RESET;
13682 			pkt->pkt_statistics |= STAT_DEV_RESET;
13683 			cmd->cmd_state = FCP_PKT_IDLE;
13684 			fcp_post_callback(cmd);
13685 		} else {
13686 			mutex_exit(&pptr->port_mutex);
13687 		}
13688 	}
13689 
13690 	/*
13691 	 * If the FCA will return all the commands in its queue then our
13692 	 * work is easy, just return.
13693 	 */
13694 
13695 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13696 		return;
13697 	}
13698 
13699 	/*
13700 	 * For RESET_LUN get hold of target pointer
13701 	 */
13702 	if (ttgt == NULL) {
13703 		ASSERT(rlun != NULL);
13704 
13705 		ttgt = rlun->lun_tgt;
13706 
13707 		ASSERT(ttgt != NULL);
13708 	}
13709 
13710 	/*
13711 	 * There are some severe race conditions here.
13712 	 * While we are trying to abort the pkt, it might be completing
13713 	 * so mark it aborted and if the abort does not succeed then
13714 	 * handle it in the watch thread.
13715 	 */
13716 	mutex_enter(&ttgt->tgt_mutex);
13717 	nlun = ttgt->tgt_lun;
13718 	mutex_exit(&ttgt->tgt_mutex);
13719 	while ((tlun = nlun) != NULL) {
13720 		int restart = 0;
13721 		if (rlun && rlun != tlun) {
13722 			mutex_enter(&ttgt->tgt_mutex);
13723 			nlun = tlun->lun_next;
13724 			mutex_exit(&ttgt->tgt_mutex);
13725 			continue;
13726 		}
13727 		mutex_enter(&tlun->lun_mutex);
13728 		cmd = tlun->lun_pkt_head;
13729 		while (cmd != NULL) {
13730 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13731 				struct scsi_pkt *pkt;
13732 
13733 				restart = 1;
13734 				cmd->cmd_state = FCP_PKT_ABORTING;
13735 				mutex_exit(&tlun->lun_mutex);
13736 				rval = fc_ulp_abort(pptr->port_fp_handle,
13737 				    cmd->cmd_fp_pkt, KM_SLEEP);
13738 				if (rval == FC_SUCCESS) {
13739 					pkt = cmd->cmd_pkt;
13740 					pkt->pkt_reason = CMD_RESET;
13741 					pkt->pkt_statistics |= STAT_DEV_RESET;
13742 					cmd->cmd_state = FCP_PKT_IDLE;
13743 					fcp_post_callback(cmd);
13744 				} else {
13745 					caddr_t msg;
13746 
13747 					(void) fc_ulp_error(rval, &msg);
13748 
13749 					/*
13750 					 * This part is tricky. The abort
13751 					 * failed and now the command could
13752 					 * be completing.  The cmd_state ==
13753 					 * FCP_PKT_ABORTING should save
13754 					 * us in fcp_cmd_callback. If we
13755 					 * are already aborting ignore the
13756 					 * command in fcp_cmd_callback.
13757 					 * Here we leave this packet for 20
13758 					 * sec to be aborted in the
13759 					 * fcp_watch thread.
13760 					 */
13761 					fcp_log(CE_WARN, pptr->port_dip,
13762 					    "!Abort failed after reset %s",
13763 					    msg);
13764 
13765 					cmd->cmd_timeout =
13766 					    fcp_watchdog_time +
13767 					    cmd->cmd_pkt->pkt_time +
13768 					    FCP_FAILED_DELAY;
13769 
13770 					cmd->cmd_fp_pkt->pkt_timeout =
13771 					    FCP_INVALID_TIMEOUT;
13772 					/*
13773 					 * This is a hack, cmd is put in the
13774 					 * overflow queue so that it can be
13775 					 * timed out finally
13776 					 */
13777 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
13778 
13779 					mutex_enter(&pptr->port_pkt_mutex);
13780 					if (pptr->port_pkt_head) {
13781 						ASSERT(pptr->port_pkt_tail
13782 						    != NULL);
13783 						pptr->port_pkt_tail->cmd_next
13784 						    = cmd;
13785 						pptr->port_pkt_tail = cmd;
13786 					} else {
13787 						ASSERT(pptr->port_pkt_tail
13788 						    == NULL);
13789 						pptr->port_pkt_head =
13790 						    pptr->port_pkt_tail
13791 						    = cmd;
13792 					}
13793 					cmd->cmd_next = NULL;
13794 					mutex_exit(&pptr->port_pkt_mutex);
13795 				}
13796 				mutex_enter(&tlun->lun_mutex);
13797 				cmd = tlun->lun_pkt_head;
13798 			} else {
13799 				cmd = cmd->cmd_forw;
13800 			}
13801 		}
13802 		mutex_exit(&tlun->lun_mutex);
13803 
13804 		mutex_enter(&ttgt->tgt_mutex);
13805 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
13806 		mutex_exit(&ttgt->tgt_mutex);
13807 
13808 		mutex_enter(&pptr->port_mutex);
13809 		if (tgt_cnt != ttgt->tgt_change_cnt) {
13810 			mutex_exit(&pptr->port_mutex);
13811 			return;
13812 		} else {
13813 			mutex_exit(&pptr->port_mutex);
13814 		}
13815 	}
13816 }
13817 
13818 
13819 /*
13820  * unlink the soft state, returning the soft state found (if any)
13821  *
13822  * acquires and releases the global mutex
13823  */
13824 struct fcp_port *
13825 fcp_soft_state_unlink(struct fcp_port *pptr)
13826 {
13827 	struct fcp_port	*hptr;		/* ptr index */
13828 	struct fcp_port	*tptr;		/* prev hptr */
13829 
13830 	mutex_enter(&fcp_global_mutex);
13831 	for (hptr = fcp_port_head, tptr = NULL;
13832 	    hptr != NULL;
13833 	    tptr = hptr, hptr = hptr->port_next) {
13834 		if (hptr == pptr) {
13835 			/* we found a match -- remove this item */
13836 			if (tptr == NULL) {
13837 				/* we're at the head of the list */
13838 				fcp_port_head = hptr->port_next;
13839 			} else {
13840 				tptr->port_next = hptr->port_next;
13841 			}
13842 			break;			/* success */
13843 		}
13844 	}
13845 	if (fcp_port_head == NULL) {
13846 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
13847 	}
13848 	mutex_exit(&fcp_global_mutex);
13849 	return (hptr);
13850 }
13851 
13852 
13853 /*
13854  * called by fcp_scsi_hba_tgt_init to find a LUN given a
13855  * WWN and a LUN number
13856  */
13857 /* ARGSUSED */
13858 static struct fcp_lun *
13859 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
13860 {
13861 	int hash;
13862 	struct fcp_tgt *ptgt;
13863 	struct fcp_lun *plun;
13864 
13865 	ASSERT(mutex_owned(&pptr->port_mutex));
13866 
13867 	hash = FCP_HASH(wwn);
13868 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
13869 	    ptgt = ptgt->tgt_next) {
13870 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
13871 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
13872 			mutex_enter(&ptgt->tgt_mutex);
13873 			for (plun = ptgt->tgt_lun;
13874 			    plun != NULL;
13875 			    plun = plun->lun_next) {
13876 				if (plun->lun_num == lun) {
13877 					mutex_exit(&ptgt->tgt_mutex);
13878 					return (plun);
13879 				}
13880 			}
13881 			mutex_exit(&ptgt->tgt_mutex);
13882 			return (NULL);
13883 		}
13884 	}
13885 	return (NULL);
13886 }
13887 
13888 /*
13889  *     Function: fcp_prepare_pkt
13890  *
13891  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
13892  *		 for fcp_start(). It binds the data or partially maps it.
13893  *		 Builds the FCP header and starts the initialization of the
13894  *		 Fibre Channel header.
13895  *
13896  *     Argument: *pptr		FCP port.
13897  *		 *cmd		FCP packet.
13898  *		 *plun		LUN the command will be sent to.
13899  *
13900  *	Context: User, Kernel and Interrupt context.
13901  */
13902 static void
13903 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
13904     struct fcp_lun *plun)
13905 {
13906 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
13907 	struct fcp_tgt		*ptgt = plun->lun_tgt;
13908 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
13909 
13910 	ASSERT(cmd->cmd_pkt->pkt_comp ||
13911 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
13912 
13913 	if (cmd->cmd_pkt->pkt_numcookies) {
13914 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
13915 			fcmd->fcp_cntl.cntl_read_data = 1;
13916 			fcmd->fcp_cntl.cntl_write_data = 0;
13917 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
13918 		} else {
13919 			fcmd->fcp_cntl.cntl_read_data = 0;
13920 			fcmd->fcp_cntl.cntl_write_data = 1;
13921 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
13922 		}
13923 
13924 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
13925 
13926 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
13927 		ASSERT(fpkt->pkt_data_cookie_cnt <=
13928 		    pptr->port_data_dma_attr.dma_attr_sgllen);
13929 
13930 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
13931 
13932 		/* FCA needs pkt_datalen to be set */
13933 		fpkt->pkt_datalen = cmd->cmd_dmacount;
13934 		fcmd->fcp_data_len = cmd->cmd_dmacount;
13935 	} else {
13936 		fcmd->fcp_cntl.cntl_read_data = 0;
13937 		fcmd->fcp_cntl.cntl_write_data = 0;
13938 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
13939 		fpkt->pkt_datalen = 0;
13940 		fcmd->fcp_data_len = 0;
13941 	}
13942 
13943 	/* set up the Tagged Queuing type */
13944 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
13945 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
13946 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
13947 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
13948 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
13949 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
13950 	} else {
13951 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
13952 	}
13953 
13954 	fcmd->fcp_ent_addr = plun->lun_addr;
13955 
13956 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
13957 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
13958 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
13959 	} else {
13960 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
13961 	}
13962 
13963 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
13964 	cmd->cmd_pkt->pkt_state = 0;
13965 	cmd->cmd_pkt->pkt_statistics = 0;
13966 	cmd->cmd_pkt->pkt_resid = 0;
13967 
13968 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
13969 
13970 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
13971 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
13972 		fpkt->pkt_comp = NULL;
13973 	} else {
13974 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
13975 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
13976 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
13977 		}
13978 		fpkt->pkt_comp = fcp_cmd_callback;
13979 	}
13980 
13981 	mutex_enter(&pptr->port_mutex);
13982 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
13983 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
13984 	}
13985 	mutex_exit(&pptr->port_mutex);
13986 
13987 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
13988 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
13989 
13990 	/*
13991 	 * Save a few kernel cycles here
13992 	 */
13993 #ifndef	__lock_lint
13994 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
13995 #endif /* __lock_lint */
13996 }
13997 
13998 static void
13999 fcp_post_callback(struct fcp_pkt *cmd)
14000 {
14001 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14002 }
14003 
14004 
14005 /*
14006  * called to do polled I/O by fcp_start()
14007  *
14008  * return a transport status value, i.e. TRAN_ACCECPT for success
14009  */
14010 static int
14011 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14012 {
14013 	int	rval;
14014 
14015 #ifdef	DEBUG
14016 	mutex_enter(&pptr->port_pkt_mutex);
14017 	pptr->port_npkts++;
14018 	mutex_exit(&pptr->port_pkt_mutex);
14019 #endif /* DEBUG */
14020 
14021 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14022 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14023 	} else {
14024 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14025 	}
14026 
14027 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14028 
14029 	cmd->cmd_state = FCP_PKT_ISSUED;
14030 
14031 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14032 
14033 #ifdef	DEBUG
14034 	mutex_enter(&pptr->port_pkt_mutex);
14035 	pptr->port_npkts--;
14036 	mutex_exit(&pptr->port_pkt_mutex);
14037 #endif /* DEBUG */
14038 
14039 	cmd->cmd_state = FCP_PKT_IDLE;
14040 
14041 	switch (rval) {
14042 	case FC_SUCCESS:
14043 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14044 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14045 			rval = TRAN_ACCEPT;
14046 		} else {
14047 			rval = TRAN_FATAL_ERROR;
14048 		}
14049 		break;
14050 
14051 	case FC_TRAN_BUSY:
14052 		rval = TRAN_BUSY;
14053 		cmd->cmd_pkt->pkt_resid = 0;
14054 		break;
14055 
14056 	case FC_BADPACKET:
14057 		rval = TRAN_BADPKT;
14058 		break;
14059 
14060 	default:
14061 		rval = TRAN_FATAL_ERROR;
14062 		break;
14063 	}
14064 
14065 	return (rval);
14066 }
14067 
14068 
14069 /*
14070  * called by some of the following transport-called routines to convert
14071  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14072  */
14073 static struct fcp_port *
14074 fcp_dip2port(dev_info_t *dip)
14075 {
14076 	int	instance;
14077 
14078 	instance = ddi_get_instance(dip);
14079 	return (ddi_get_soft_state(fcp_softstate, instance));
14080 }
14081 
14082 
14083 /*
14084  * called internally to return a LUN given a dip
14085  */
14086 struct fcp_lun *
14087 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14088 {
14089 	struct fcp_tgt *ptgt;
14090 	struct fcp_lun *plun;
14091 	int i;
14092 
14093 
14094 	ASSERT(mutex_owned(&pptr->port_mutex));
14095 
14096 	for (i = 0; i < FCP_NUM_HASH; i++) {
14097 		for (ptgt = pptr->port_tgt_hash_table[i];
14098 		    ptgt != NULL;
14099 		    ptgt = ptgt->tgt_next) {
14100 			mutex_enter(&ptgt->tgt_mutex);
14101 			for (plun = ptgt->tgt_lun; plun != NULL;
14102 			    plun = plun->lun_next) {
14103 				mutex_enter(&plun->lun_mutex);
14104 				if (plun->lun_cip == cip) {
14105 					mutex_exit(&plun->lun_mutex);
14106 					mutex_exit(&ptgt->tgt_mutex);
14107 					return (plun); /* match found */
14108 				}
14109 				mutex_exit(&plun->lun_mutex);
14110 			}
14111 			mutex_exit(&ptgt->tgt_mutex);
14112 		}
14113 	}
14114 	return (NULL);				/* no LUN found */
14115 }
14116 
14117 /*
14118  * pass an element to the hotplug list, kick the hotplug thread
14119  * and wait for the element to get processed by the hotplug thread.
14120  * on return the element is freed.
14121  *
14122  * return zero success and non-zero on failure
14123  *
14124  * acquires/releases the target mutex
14125  *
14126  */
14127 static int
14128 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14129     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14130 {
14131 	struct fcp_hp_elem	*elem;
14132 	int			rval;
14133 
14134 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14135 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14136 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14137 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14138 		fcp_log(CE_CONT, pptr->port_dip,
14139 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14140 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14141 		return (NDI_FAILURE);
14142 	}
14143 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14144 	mutex_enter(&elem->mutex);
14145 	if (elem->wait) {
14146 		while (elem->wait) {
14147 			cv_wait(&elem->cv, &elem->mutex);
14148 		}
14149 	}
14150 	rval = (elem->result);
14151 	mutex_exit(&elem->mutex);
14152 	mutex_destroy(&elem->mutex);
14153 	cv_destroy(&elem->cv);
14154 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14155 	return (rval);
14156 }
14157 
14158 /*
14159  * pass an element to the hotplug list, and then
14160  * kick the hotplug thread
14161  *
14162  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14163  *
14164  * acquires/releases the hotplug mutex
14165  *
14166  * called with the target mutex owned
14167  *
14168  * memory acquired in NOSLEEP mode
14169  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14170  *	 for the hp daemon to process the request and is responsible for
14171  *	 freeing the element
14172  */
14173 static struct fcp_hp_elem *
14174 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14175     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14176 {
14177 	struct fcp_hp_elem	*elem;
14178 	dev_info_t *pdip;
14179 
14180 	ASSERT(pptr != NULL);
14181 	ASSERT(plun != NULL);
14182 	ASSERT(plun->lun_tgt != NULL);
14183 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14184 
14185 	/* create space for a hotplug element */
14186 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14187 	    == NULL) {
14188 		fcp_log(CE_WARN, NULL,
14189 		    "!can't allocate memory for hotplug element");
14190 		return (NULL);
14191 	}
14192 
14193 	/* fill in hotplug element */
14194 	elem->port = pptr;
14195 	elem->lun = plun;
14196 	elem->cip = cip;
14197 	elem->old_lun_mpxio = plun->lun_mpxio;
14198 	elem->what = what;
14199 	elem->flags = flags;
14200 	elem->link_cnt = link_cnt;
14201 	elem->tgt_cnt = tgt_cnt;
14202 	elem->wait = wait;
14203 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14204 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14205 
14206 	/* schedule the hotplug task */
14207 	pdip = pptr->port_dip;
14208 	mutex_enter(&plun->lun_mutex);
14209 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14210 		plun->lun_event_count++;
14211 		elem->event_cnt = plun->lun_event_count;
14212 	}
14213 	mutex_exit(&plun->lun_mutex);
14214 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14215 	    (void *)elem, KM_NOSLEEP) == NULL) {
14216 		mutex_enter(&plun->lun_mutex);
14217 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14218 			plun->lun_event_count--;
14219 		}
14220 		mutex_exit(&plun->lun_mutex);
14221 		kmem_free(elem, sizeof (*elem));
14222 		return (0);
14223 	}
14224 
14225 	return (elem);
14226 }
14227 
14228 
14229 static void
14230 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14231 {
14232 	int			rval;
14233 	struct scsi_address	*ap;
14234 	struct fcp_lun	*plun;
14235 	struct fcp_tgt	*ptgt;
14236 	fc_packet_t	*fpkt;
14237 
14238 	ap = &cmd->cmd_pkt->pkt_address;
14239 	plun = ADDR2LUN(ap);
14240 	ptgt = plun->lun_tgt;
14241 
14242 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14243 
14244 	cmd->cmd_state = FCP_PKT_IDLE;
14245 
14246 	mutex_enter(&pptr->port_mutex);
14247 	mutex_enter(&ptgt->tgt_mutex);
14248 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14249 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14250 		fc_ulp_rscn_info_t *rscnp;
14251 
14252 		cmd->cmd_state = FCP_PKT_ISSUED;
14253 
14254 		/*
14255 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14256 		 * originally NULL, hence we try to set it to the pd pointed
14257 		 * to by the SCSI device we're trying to get to.
14258 		 */
14259 
14260 		fpkt = cmd->cmd_fp_pkt;
14261 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14262 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14263 			/*
14264 			 * We need to notify the transport that we now have a
14265 			 * reference to the remote port handle.
14266 			 */
14267 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14268 		}
14269 
14270 		mutex_exit(&ptgt->tgt_mutex);
14271 		mutex_exit(&pptr->port_mutex);
14272 
14273 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14274 
14275 		/* prepare the packet */
14276 
14277 		fcp_prepare_pkt(pptr, cmd, plun);
14278 
14279 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14280 		    pkt_ulp_rscn_infop;
14281 
14282 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14283 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14284 
14285 		if (rscnp != NULL) {
14286 			rscnp->ulp_rscn_count =
14287 			    fc_ulp_get_rscn_count(pptr->
14288 			    port_fp_handle);
14289 		}
14290 
14291 		rval = fcp_transport(pptr->port_fp_handle,
14292 		    cmd->cmd_fp_pkt, 0);
14293 
14294 		if (rval == FC_SUCCESS) {
14295 			return;
14296 		}
14297 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14298 	} else {
14299 		mutex_exit(&ptgt->tgt_mutex);
14300 		mutex_exit(&pptr->port_mutex);
14301 	}
14302 
14303 	fcp_queue_pkt(pptr, cmd);
14304 }
14305 
14306 
14307 static void
14308 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14309 {
14310 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14311 
14312 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14313 	cmd->cmd_state = FCP_PKT_IDLE;
14314 
14315 	cmd->cmd_pkt->pkt_reason = reason;
14316 	cmd->cmd_pkt->pkt_state = 0;
14317 	cmd->cmd_pkt->pkt_statistics = statistics;
14318 
14319 	fcp_post_callback(cmd);
14320 }
14321 
14322 /*
14323  *     Function: fcp_queue_pkt
14324  *
14325  *  Description: This function queues the packet passed by the caller into
14326  *		 the list of packets of the FCP port.
14327  *
14328  *     Argument: *pptr		FCP port.
14329  *		 *cmd		FCP packet to queue.
14330  *
14331  * Return Value: None
14332  *
14333  *	Context: User, Kernel and Interrupt context.
14334  */
14335 static void
14336 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14337 {
14338 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14339 
14340 	mutex_enter(&pptr->port_pkt_mutex);
14341 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14342 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14343 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14344 
14345 	/*
14346 	 * zero pkt_time means hang around for ever
14347 	 */
14348 	if (cmd->cmd_pkt->pkt_time) {
14349 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14350 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14351 		} else {
14352 			/*
14353 			 * Indicate the watch thread to fail the
14354 			 * command by setting it to highest value
14355 			 */
14356 			cmd->cmd_timeout = fcp_watchdog_time;
14357 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14358 		}
14359 	}
14360 
14361 	if (pptr->port_pkt_head) {
14362 		ASSERT(pptr->port_pkt_tail != NULL);
14363 
14364 		pptr->port_pkt_tail->cmd_next = cmd;
14365 		pptr->port_pkt_tail = cmd;
14366 	} else {
14367 		ASSERT(pptr->port_pkt_tail == NULL);
14368 
14369 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14370 	}
14371 	cmd->cmd_next = NULL;
14372 	mutex_exit(&pptr->port_pkt_mutex);
14373 }
14374 
14375 /*
14376  *     Function: fcp_update_targets
14377  *
14378  *  Description: This function applies the specified change of state to all
14379  *		 the targets listed.  The operation applied is 'set'.
14380  *
14381  *     Argument: *pptr		FCP port.
14382  *		 *dev_list	Array of fc_portmap_t structures.
14383  *		 count		Length of dev_list.
14384  *		 state		State bits to update.
14385  *		 cause		Reason for the update.
14386  *
14387  * Return Value: None
14388  *
14389  *	Context: User, Kernel and Interrupt context.
14390  *		 The mutex pptr->port_mutex must be held.
14391  */
14392 static void
14393 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14394     uint32_t count, uint32_t state, int cause)
14395 {
14396 	fc_portmap_t		*map_entry;
14397 	struct fcp_tgt	*ptgt;
14398 
14399 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14400 
14401 	while (count--) {
14402 		map_entry = &(dev_list[count]);
14403 		ptgt = fcp_lookup_target(pptr,
14404 		    (uchar_t *)&(map_entry->map_pwwn));
14405 		if (ptgt == NULL) {
14406 			continue;
14407 		}
14408 
14409 		mutex_enter(&ptgt->tgt_mutex);
14410 		ptgt->tgt_trace = 0;
14411 		ptgt->tgt_change_cnt++;
14412 		ptgt->tgt_statec_cause = cause;
14413 		ptgt->tgt_tmp_cnt = 1;
14414 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14415 		mutex_exit(&ptgt->tgt_mutex);
14416 	}
14417 }
14418 
14419 static int
14420 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14421     int lcount, int tcount, int cause)
14422 {
14423 	int rval;
14424 
14425 	mutex_enter(&pptr->port_mutex);
14426 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14427 	mutex_exit(&pptr->port_mutex);
14428 
14429 	return (rval);
14430 }
14431 
14432 
14433 static int
14434 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14435     int lcount, int tcount, int cause)
14436 {
14437 	int	finish_init = 0;
14438 	int	finish_tgt = 0;
14439 	int	do_finish_init = 0;
14440 	int	rval = FCP_NO_CHANGE;
14441 
14442 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14443 	    cause == FCP_CAUSE_LINK_DOWN) {
14444 		do_finish_init = 1;
14445 	}
14446 
14447 	if (ptgt != NULL) {
14448 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14449 		    FCP_BUF_LEVEL_2, 0,
14450 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14451 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14452 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14453 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14454 		    ptgt->tgt_d_id, ptgt->tgt_done);
14455 
14456 		mutex_enter(&ptgt->tgt_mutex);
14457 
14458 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14459 			rval = FCP_DEV_CHANGE;
14460 			if (do_finish_init && ptgt->tgt_done == 0) {
14461 				ptgt->tgt_done++;
14462 				finish_init = 1;
14463 			}
14464 		} else {
14465 			if (--ptgt->tgt_tmp_cnt <= 0) {
14466 				ptgt->tgt_tmp_cnt = 0;
14467 				finish_tgt = 1;
14468 
14469 				if (do_finish_init) {
14470 					finish_init = 1;
14471 				}
14472 			}
14473 		}
14474 		mutex_exit(&ptgt->tgt_mutex);
14475 	} else {
14476 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14477 		    FCP_BUF_LEVEL_2, 0,
14478 		    "Call Finish Init for NO target");
14479 
14480 		if (do_finish_init) {
14481 			finish_init = 1;
14482 		}
14483 	}
14484 
14485 	if (finish_tgt) {
14486 		ASSERT(ptgt != NULL);
14487 
14488 		mutex_enter(&ptgt->tgt_mutex);
14489 #ifdef	DEBUG
14490 		bzero(ptgt->tgt_tmp_cnt_stack,
14491 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14492 
14493 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14494 		    FCP_STACK_DEPTH);
14495 #endif /* DEBUG */
14496 		mutex_exit(&ptgt->tgt_mutex);
14497 
14498 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14499 	}
14500 
14501 	if (finish_init && lcount == pptr->port_link_cnt) {
14502 		ASSERT(pptr->port_tmp_cnt > 0);
14503 		if (--pptr->port_tmp_cnt == 0) {
14504 			fcp_finish_init(pptr);
14505 		}
14506 	} else if (lcount != pptr->port_link_cnt) {
14507 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14508 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14509 		    "fcp_call_finish_init_held,1: state change occured"
14510 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14511 	}
14512 
14513 	return (rval);
14514 }
14515 
14516 
14517 static void
14518 fcp_reconfigure_luns(void * tgt_handle)
14519 {
14520 	uint32_t		dev_cnt;
14521 	fc_portmap_t		*devlist;
14522 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14523 	struct fcp_port		*pptr = ptgt->tgt_port;
14524 
14525 	/*
14526 	 * If the timer that fires this off got canceled too late, the
14527 	 * target could have been destroyed.
14528 	 */
14529 
14530 	if (ptgt->tgt_tid == NULL) {
14531 		return;
14532 	}
14533 
14534 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14535 	if (devlist == NULL) {
14536 		fcp_log(CE_WARN, pptr->port_dip,
14537 		    "!fcp%d: failed to allocate for portmap",
14538 		    pptr->port_instance);
14539 		return;
14540 	}
14541 
14542 	dev_cnt = 1;
14543 	devlist->map_pd = ptgt->tgt_pd_handle;
14544 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14545 	devlist->map_did.port_id = ptgt->tgt_d_id;
14546 
14547 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14548 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14549 
14550 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14551 	devlist->map_type = PORT_DEVICE_NEW;
14552 	devlist->map_flags = 0;
14553 
14554 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14555 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14556 
14557 	/*
14558 	 * Clear the tgt_tid after no more references to
14559 	 * the fcp_tgt
14560 	 */
14561 	mutex_enter(&ptgt->tgt_mutex);
14562 	ptgt->tgt_tid = NULL;
14563 	mutex_exit(&ptgt->tgt_mutex);
14564 
14565 	kmem_free(devlist, sizeof (*devlist));
14566 }
14567 
14568 
14569 static void
14570 fcp_free_targets(struct fcp_port *pptr)
14571 {
14572 	int			i;
14573 	struct fcp_tgt	*ptgt;
14574 
14575 	mutex_enter(&pptr->port_mutex);
14576 	for (i = 0; i < FCP_NUM_HASH; i++) {
14577 		ptgt = pptr->port_tgt_hash_table[i];
14578 		while (ptgt != NULL) {
14579 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14580 
14581 			fcp_free_target(ptgt);
14582 			ptgt = next_tgt;
14583 		}
14584 	}
14585 	mutex_exit(&pptr->port_mutex);
14586 }
14587 
14588 
14589 static void
14590 fcp_free_target(struct fcp_tgt *ptgt)
14591 {
14592 	struct fcp_lun	*plun;
14593 	timeout_id_t		tid;
14594 
14595 	mutex_enter(&ptgt->tgt_mutex);
14596 	tid = ptgt->tgt_tid;
14597 
14598 	/*
14599 	 * Cancel any pending timeouts for this target.
14600 	 */
14601 
14602 	if (tid != NULL) {
14603 		/*
14604 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14605 		 * If tgt_tid is NULL, the callback will simply return.
14606 		 */
14607 		ptgt->tgt_tid = NULL;
14608 		mutex_exit(&ptgt->tgt_mutex);
14609 		(void) untimeout(tid);
14610 		mutex_enter(&ptgt->tgt_mutex);
14611 	}
14612 
14613 	plun = ptgt->tgt_lun;
14614 	while (plun != NULL) {
14615 		struct fcp_lun *next_lun = plun->lun_next;
14616 
14617 		fcp_dealloc_lun(plun);
14618 		plun = next_lun;
14619 	}
14620 
14621 	mutex_exit(&ptgt->tgt_mutex);
14622 	fcp_dealloc_tgt(ptgt);
14623 }
14624 
14625 /*
14626  *     Function: fcp_is_retryable
14627  *
14628  *  Description: Indicates if the internal packet is retryable.
14629  *
14630  *     Argument: *icmd		FCP internal packet.
14631  *
14632  * Return Value: 0	Not retryable
14633  *		 1	Retryable
14634  *
14635  *	Context: User, Kernel and Interrupt context
14636  */
14637 static int
14638 fcp_is_retryable(struct fcp_ipkt *icmd)
14639 {
14640 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14641 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14642 		return (0);
14643 	}
14644 
14645 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14646 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14647 }
14648 
14649 /*
14650  *     Function: fcp_create_on_demand
14651  *
14652  *     Argument: *pptr		FCP port.
14653  *		 *pwwn		Port WWN.
14654  *
14655  * Return Value: 0	Success
14656  *		 EIO
14657  *		 ENOMEM
14658  *		 EBUSY
14659  *		 EINVAL
14660  *
14661  *	Context: User and Kernel context
14662  */
14663 static int
14664 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14665 {
14666 	int			wait_ms;
14667 	int			tcount;
14668 	int			lcount;
14669 	int			ret;
14670 	int			error;
14671 	int			rval = EIO;
14672 	int			ntries;
14673 	fc_portmap_t		*devlist;
14674 	opaque_t		pd;
14675 	struct fcp_lun		*plun;
14676 	struct fcp_tgt		*ptgt;
14677 	int			old_manual = 0;
14678 
14679 	/* Allocates the fc_portmap_t structure. */
14680 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14681 
14682 	/*
14683 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14684 	 * in the commented statement below:
14685 	 *
14686 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14687 	 *
14688 	 * Below, the deadline for the discovery process is set.
14689 	 */
14690 	mutex_enter(&pptr->port_mutex);
14691 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14692 	mutex_exit(&pptr->port_mutex);
14693 
14694 	/*
14695 	 * We try to find the remote port based on the WWN provided by the
14696 	 * caller.  We actually ask fp/fctl if it has it.
14697 	 */
14698 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14699 	    (la_wwn_t *)pwwn, &error, 1);
14700 
14701 	if (pd == NULL) {
14702 		kmem_free(devlist, sizeof (*devlist));
14703 		return (rval);
14704 	}
14705 
14706 	/*
14707 	 * The remote port was found.  We ask fp/fctl to update our
14708 	 * fc_portmap_t structure.
14709 	 */
14710 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14711 	    (la_wwn_t *)pwwn, devlist);
14712 	if (ret != FC_SUCCESS) {
14713 		kmem_free(devlist, sizeof (*devlist));
14714 		return (rval);
14715 	}
14716 
14717 	/*
14718 	 * The map flag field is set to indicates that the creation is being
14719 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14720 	 */
14721 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14722 
14723 	mutex_enter(&pptr->port_mutex);
14724 
14725 	/*
14726 	 * We check to see if fcp already has a target that describes the
14727 	 * device being created.  If not it is created.
14728 	 */
14729 	ptgt = fcp_lookup_target(pptr, pwwn);
14730 	if (ptgt == NULL) {
14731 		lcount = pptr->port_link_cnt;
14732 		mutex_exit(&pptr->port_mutex);
14733 
14734 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14735 		if (ptgt == NULL) {
14736 			fcp_log(CE_WARN, pptr->port_dip,
14737 			    "!FC target allocation failed");
14738 			return (ENOMEM);
14739 		}
14740 
14741 		mutex_enter(&pptr->port_mutex);
14742 	}
14743 
14744 	mutex_enter(&ptgt->tgt_mutex);
14745 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14746 	ptgt->tgt_tmp_cnt = 1;
14747 	ptgt->tgt_device_created = 0;
14748 	/*
14749 	 * If fabric and auto config is set but the target was
14750 	 * manually unconfigured then reset to the manual_config_only to
14751 	 * 0 so the device will get configured.
14752 	 */
14753 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14754 	    fcp_enable_auto_configuration &&
14755 	    ptgt->tgt_manual_config_only == 1) {
14756 		old_manual = 1;
14757 		ptgt->tgt_manual_config_only = 0;
14758 	}
14759 	mutex_exit(&ptgt->tgt_mutex);
14760 
14761 	fcp_update_targets(pptr, devlist, 1,
14762 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14763 
14764 	lcount = pptr->port_link_cnt;
14765 	tcount = ptgt->tgt_change_cnt;
14766 
14767 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14768 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14769 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14770 		    fcp_enable_auto_configuration && old_manual) {
14771 			mutex_enter(&ptgt->tgt_mutex);
14772 			ptgt->tgt_manual_config_only = 1;
14773 			mutex_exit(&ptgt->tgt_mutex);
14774 		}
14775 
14776 		if (pptr->port_link_cnt != lcount ||
14777 		    ptgt->tgt_change_cnt != tcount) {
14778 			rval = EBUSY;
14779 		}
14780 		mutex_exit(&pptr->port_mutex);
14781 
14782 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14783 		    FCP_BUF_LEVEL_3, 0,
14784 		    "fcp_create_on_demand: mapflags ptgt=%x, "
14785 		    "lcount=%x::port_link_cnt=%x, "
14786 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
14787 		    ptgt, lcount, pptr->port_link_cnt,
14788 		    tcount, ptgt->tgt_change_cnt, rval);
14789 		return (rval);
14790 	}
14791 
14792 	/*
14793 	 * Due to lack of synchronization mechanisms, we perform
14794 	 * periodic monitoring of our request; Because requests
14795 	 * get dropped when another one supercedes (either because
14796 	 * of a link change or a target change), it is difficult to
14797 	 * provide a clean synchronization mechanism (such as a
14798 	 * semaphore or a conditional variable) without exhaustively
14799 	 * rewriting the mainline discovery code of this driver.
14800 	 */
14801 	wait_ms = 500;
14802 
14803 	ntries = fcp_max_target_retries;
14804 
14805 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14806 	    FCP_BUF_LEVEL_3, 0,
14807 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
14808 	    "lcount=%x::port_link_cnt=%x, "
14809 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14810 	    "tgt_tmp_cnt =%x",
14811 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14812 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14813 	    ptgt->tgt_tmp_cnt);
14814 
14815 	mutex_enter(&ptgt->tgt_mutex);
14816 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
14817 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
14818 		mutex_exit(&ptgt->tgt_mutex);
14819 		mutex_exit(&pptr->port_mutex);
14820 
14821 		delay(drv_usectohz(wait_ms * 1000));
14822 
14823 		mutex_enter(&pptr->port_mutex);
14824 		mutex_enter(&ptgt->tgt_mutex);
14825 	}
14826 
14827 
14828 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
14829 		rval = EBUSY;
14830 	} else {
14831 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
14832 		    FCP_TGT_NODE_PRESENT) {
14833 			rval = 0;
14834 		}
14835 	}
14836 
14837 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14838 	    FCP_BUF_LEVEL_3, 0,
14839 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
14840 	    "lcount=%x::port_link_cnt=%x, "
14841 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14842 	    "tgt_tmp_cnt =%x",
14843 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14844 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14845 	    ptgt->tgt_tmp_cnt);
14846 
14847 	if (rval) {
14848 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14849 		    fcp_enable_auto_configuration && old_manual) {
14850 			ptgt->tgt_manual_config_only = 1;
14851 		}
14852 		mutex_exit(&ptgt->tgt_mutex);
14853 		mutex_exit(&pptr->port_mutex);
14854 		kmem_free(devlist, sizeof (*devlist));
14855 
14856 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14857 		    FCP_BUF_LEVEL_3, 0,
14858 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
14859 		    "lcount=%x::port_link_cnt=%x, "
14860 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
14861 		    "tgt_device_created=%x, tgt D_ID=%x",
14862 		    ntries, ptgt, lcount, pptr->port_link_cnt,
14863 		    tcount, ptgt->tgt_change_cnt, rval,
14864 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
14865 		return (rval);
14866 	}
14867 
14868 	if ((plun = ptgt->tgt_lun) != NULL) {
14869 		tcount = plun->lun_tgt->tgt_change_cnt;
14870 	} else {
14871 		rval = EINVAL;
14872 	}
14873 	lcount = pptr->port_link_cnt;
14874 
14875 	/*
14876 	 * Configuring the target with no LUNs will fail. We
14877 	 * should reset the node state so that it is not
14878 	 * automatically configured when the LUNs are added
14879 	 * to this target.
14880 	 */
14881 	if (ptgt->tgt_lun_cnt == 0) {
14882 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
14883 	}
14884 	mutex_exit(&ptgt->tgt_mutex);
14885 	mutex_exit(&pptr->port_mutex);
14886 
14887 	while (plun) {
14888 		child_info_t	*cip;
14889 
14890 		mutex_enter(&plun->lun_mutex);
14891 		cip = plun->lun_cip;
14892 		mutex_exit(&plun->lun_mutex);
14893 
14894 		mutex_enter(&ptgt->tgt_mutex);
14895 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
14896 			mutex_exit(&ptgt->tgt_mutex);
14897 
14898 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
14899 			    FCP_ONLINE, lcount, tcount,
14900 			    NDI_ONLINE_ATTACH);
14901 			if (rval != NDI_SUCCESS) {
14902 				FCP_TRACE(fcp_logq,
14903 				    pptr->port_instbuf, fcp_trace,
14904 				    FCP_BUF_LEVEL_3, 0,
14905 				    "fcp_create_on_demand: "
14906 				    "pass_to_hp_and_wait failed "
14907 				    "rval=%x", rval);
14908 				rval = EIO;
14909 			} else {
14910 				mutex_enter(&LUN_TGT->tgt_mutex);
14911 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
14912 				    FCP_LUN_BUSY);
14913 				mutex_exit(&LUN_TGT->tgt_mutex);
14914 			}
14915 			mutex_enter(&ptgt->tgt_mutex);
14916 		}
14917 
14918 		plun = plun->lun_next;
14919 		mutex_exit(&ptgt->tgt_mutex);
14920 	}
14921 
14922 	kmem_free(devlist, sizeof (*devlist));
14923 
14924 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14925 	    fcp_enable_auto_configuration && old_manual) {
14926 		mutex_enter(&ptgt->tgt_mutex);
14927 		/* if successful then set manual to 0 */
14928 		if (rval == 0) {
14929 			ptgt->tgt_manual_config_only = 0;
14930 		} else {
14931 			/* reset to 1 so the user has to do the config */
14932 			ptgt->tgt_manual_config_only = 1;
14933 		}
14934 		mutex_exit(&ptgt->tgt_mutex);
14935 	}
14936 
14937 	return (rval);
14938 }
14939 
14940 
14941 static void
14942 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
14943 {
14944 	int		count;
14945 	uchar_t		byte;
14946 
14947 	count = 0;
14948 	while (*string) {
14949 		byte = FCP_ATOB(*string); string++;
14950 		byte = byte << 4 | FCP_ATOB(*string); string++;
14951 		bytes[count++] = byte;
14952 
14953 		if (count >= byte_len) {
14954 			break;
14955 		}
14956 	}
14957 }
14958 
14959 static void
14960 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
14961 {
14962 	int		i;
14963 
14964 	for (i = 0; i < FC_WWN_SIZE; i++) {
14965 		(void) sprintf(string + (i * 2),
14966 		    "%02x", wwn[i]);
14967 	}
14968 
14969 }
14970 
14971 static void
14972 fcp_print_error(fc_packet_t *fpkt)
14973 {
14974 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
14975 	    fpkt->pkt_ulp_private;
14976 	struct fcp_port	*pptr;
14977 	struct fcp_tgt	*ptgt;
14978 	struct fcp_lun	*plun;
14979 	caddr_t			buf;
14980 	int			scsi_cmd = 0;
14981 
14982 	ptgt = icmd->ipkt_tgt;
14983 	plun = icmd->ipkt_lun;
14984 	pptr = ptgt->tgt_port;
14985 
14986 	buf = kmem_zalloc(256, KM_NOSLEEP);
14987 	if (buf == NULL) {
14988 		return;
14989 	}
14990 
14991 	switch (icmd->ipkt_opcode) {
14992 	case SCMD_REPORT_LUN:
14993 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
14994 		    " lun=0x%%x failed");
14995 		scsi_cmd++;
14996 		break;
14997 
14998 	case SCMD_INQUIRY_PAGE83:
14999 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15000 		    " lun=0x%%x failed");
15001 		scsi_cmd++;
15002 		break;
15003 
15004 	case SCMD_INQUIRY:
15005 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15006 		    " lun=0x%%x failed");
15007 		scsi_cmd++;
15008 		break;
15009 
15010 	case LA_ELS_PLOGI:
15011 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15012 		break;
15013 
15014 	case LA_ELS_PRLI:
15015 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15016 		break;
15017 	}
15018 
15019 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15020 		struct fcp_rsp		response, *rsp;
15021 		uchar_t			asc, ascq;
15022 		caddr_t			sense_key = NULL;
15023 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15024 
15025 		if (icmd->ipkt_nodma) {
15026 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15027 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15028 			    sizeof (struct fcp_rsp));
15029 		} else {
15030 			rsp = &response;
15031 			bep = &fcp_rsp_err;
15032 
15033 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15034 			    sizeof (struct fcp_rsp));
15035 
15036 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15037 			    bep, fpkt->pkt_resp_acc,
15038 			    sizeof (struct fcp_rsp_info));
15039 		}
15040 
15041 
15042 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15043 			(void) sprintf(buf + strlen(buf),
15044 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15045 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15046 			    " senselen=%%x. Giving up");
15047 
15048 			fcp_log(CE_WARN, pptr->port_dip, buf,
15049 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15050 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15051 			    rsp->fcp_u.fcp_status.reserved_1,
15052 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15053 
15054 			kmem_free(buf, 256);
15055 			return;
15056 		}
15057 
15058 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15059 		    bep->rsp_code != FCP_NO_FAILURE) {
15060 			(void) sprintf(buf + strlen(buf),
15061 			    " FCP Response code = 0x%x", bep->rsp_code);
15062 		}
15063 
15064 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15065 			struct scsi_extended_sense sense_info, *sense_ptr;
15066 
15067 			if (icmd->ipkt_nodma) {
15068 				sense_ptr = (struct scsi_extended_sense *)
15069 				    ((caddr_t)fpkt->pkt_resp +
15070 				    sizeof (struct fcp_rsp) +
15071 				    rsp->fcp_response_len);
15072 			} else {
15073 				sense_ptr = &sense_info;
15074 
15075 				FCP_CP_IN(fpkt->pkt_resp +
15076 				    sizeof (struct fcp_rsp) +
15077 				    rsp->fcp_response_len, &sense_info,
15078 				    fpkt->pkt_resp_acc,
15079 				    sizeof (struct scsi_extended_sense));
15080 			}
15081 
15082 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15083 			    NUM_IMPL_SENSE_KEYS) {
15084 				sense_key = sense_keys[sense_ptr->es_key];
15085 			} else {
15086 				sense_key = "Undefined";
15087 			}
15088 
15089 			asc = sense_ptr->es_add_code;
15090 			ascq = sense_ptr->es_qual_code;
15091 
15092 			(void) sprintf(buf + strlen(buf),
15093 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15094 			    " Giving up");
15095 
15096 			fcp_log(CE_WARN, pptr->port_dip, buf,
15097 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15098 			    asc, ascq);
15099 		} else {
15100 			(void) sprintf(buf + strlen(buf),
15101 			    " : SCSI status=%%x. Giving up");
15102 
15103 			fcp_log(CE_WARN, pptr->port_dip, buf,
15104 			    ptgt->tgt_d_id, plun->lun_num,
15105 			    rsp->fcp_u.fcp_status.scsi_status);
15106 		}
15107 	} else {
15108 		caddr_t state, reason, action, expln;
15109 
15110 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15111 		    &action, &expln);
15112 
15113 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15114 		    " Reason:%%s. Giving up");
15115 
15116 		if (scsi_cmd) {
15117 			fcp_log(CE_WARN, pptr->port_dip, buf,
15118 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15119 		} else {
15120 			fcp_log(CE_WARN, pptr->port_dip, buf,
15121 			    ptgt->tgt_d_id, state, reason);
15122 		}
15123 	}
15124 
15125 	kmem_free(buf, 256);
15126 }
15127 
15128 
15129 static int
15130 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15131     struct fcp_ipkt *icmd, int rval, caddr_t op)
15132 {
15133 	int	ret = DDI_FAILURE;
15134 	char	*error;
15135 
15136 	switch (rval) {
15137 	case FC_DEVICE_BUSY_NEW_RSCN:
15138 		/*
15139 		 * This means that there was a new RSCN that the transport
15140 		 * knows about (which the ULP *may* know about too) but the
15141 		 * pkt that was sent down was related to an older RSCN. So, we
15142 		 * are just going to reset the retry count and deadline and
15143 		 * continue to retry. The idea is that transport is currently
15144 		 * working on the new RSCN and will soon let the ULPs know
15145 		 * about it and when it does the existing logic will kick in
15146 		 * where it will change the tcount to indicate that something
15147 		 * changed on the target. So, rediscovery will start and there
15148 		 * will not be an infinite retry.
15149 		 *
15150 		 * For a full flow of how the RSCN info is transferred back and
15151 		 * forth, see fp.c
15152 		 */
15153 		icmd->ipkt_retries = 0;
15154 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15155 		    FCP_ICMD_DEADLINE;
15156 
15157 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15158 		    FCP_BUF_LEVEL_3, 0,
15159 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15160 		    rval, ptgt->tgt_d_id);
15161 		/* FALLTHROUGH */
15162 
15163 	case FC_STATEC_BUSY:
15164 	case FC_DEVICE_BUSY:
15165 	case FC_PBUSY:
15166 	case FC_FBUSY:
15167 	case FC_TRAN_BUSY:
15168 	case FC_OFFLINE:
15169 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15170 		    FCP_BUF_LEVEL_3, 0,
15171 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15172 		    rval, ptgt->tgt_d_id);
15173 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15174 		    fcp_is_retryable(icmd)) {
15175 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15176 			ret = DDI_SUCCESS;
15177 		}
15178 		break;
15179 
15180 	case FC_LOGINREQ:
15181 		/*
15182 		 * FC_LOGINREQ used to be handled just like all the cases
15183 		 * above. It has been changed to handled a PRLI that fails
15184 		 * with FC_LOGINREQ different than other ipkts that fail
15185 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15186 		 * a simple matter to turn it into a PLOGI instead, so that's
15187 		 * exactly what we do here.
15188 		 */
15189 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15190 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15191 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15192 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15193 		} else {
15194 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15195 			    FCP_BUF_LEVEL_3, 0,
15196 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15197 			    rval, ptgt->tgt_d_id);
15198 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15199 			    fcp_is_retryable(icmd)) {
15200 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15201 				ret = DDI_SUCCESS;
15202 			}
15203 		}
15204 		break;
15205 
15206 	default:
15207 		mutex_enter(&pptr->port_mutex);
15208 		mutex_enter(&ptgt->tgt_mutex);
15209 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15210 			mutex_exit(&ptgt->tgt_mutex);
15211 			mutex_exit(&pptr->port_mutex);
15212 
15213 			(void) fc_ulp_error(rval, &error);
15214 			fcp_log(CE_WARN, pptr->port_dip,
15215 			    "!Failed to send %s to D_ID=%x error=%s",
15216 			    op, ptgt->tgt_d_id, error);
15217 		} else {
15218 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15219 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15220 			    "fcp_handle_ipkt_errors,1: state change occured"
15221 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15222 			mutex_exit(&ptgt->tgt_mutex);
15223 			mutex_exit(&pptr->port_mutex);
15224 		}
15225 		break;
15226 	}
15227 
15228 	return (ret);
15229 }
15230 
15231 
15232 /*
15233  * Check of outstanding commands on any LUN for this target
15234  */
15235 static int
15236 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15237 {
15238 	struct	fcp_lun	*plun;
15239 	struct	fcp_pkt	*cmd;
15240 
15241 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15242 		mutex_enter(&plun->lun_mutex);
15243 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15244 		    cmd = cmd->cmd_forw) {
15245 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15246 				mutex_exit(&plun->lun_mutex);
15247 				return (FC_SUCCESS);
15248 			}
15249 		}
15250 		mutex_exit(&plun->lun_mutex);
15251 	}
15252 
15253 	return (FC_FAILURE);
15254 }
15255 
15256 static fc_portmap_t *
15257 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15258 {
15259 	int			i;
15260 	fc_portmap_t		*devlist;
15261 	fc_portmap_t		*devptr = NULL;
15262 	struct fcp_tgt	*ptgt;
15263 
15264 	mutex_enter(&pptr->port_mutex);
15265 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15266 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15267 		    ptgt = ptgt->tgt_next) {
15268 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15269 				++*dev_cnt;
15270 			}
15271 		}
15272 	}
15273 
15274 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15275 	    KM_NOSLEEP);
15276 	if (devlist == NULL) {
15277 		mutex_exit(&pptr->port_mutex);
15278 		fcp_log(CE_WARN, pptr->port_dip,
15279 		    "!fcp%d: failed to allocate for portmap for construct map",
15280 		    pptr->port_instance);
15281 		return (devptr);
15282 	}
15283 
15284 	for (i = 0; i < FCP_NUM_HASH; i++) {
15285 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15286 		    ptgt = ptgt->tgt_next) {
15287 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15288 				int ret;
15289 
15290 				ret = fc_ulp_pwwn_to_portmap(
15291 				    pptr->port_fp_handle,
15292 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15293 				    devlist);
15294 
15295 				if (ret == FC_SUCCESS) {
15296 					devlist++;
15297 					continue;
15298 				}
15299 
15300 				devlist->map_pd = NULL;
15301 				devlist->map_did.port_id = ptgt->tgt_d_id;
15302 				devlist->map_hard_addr.hard_addr =
15303 				    ptgt->tgt_hard_addr;
15304 
15305 				devlist->map_state = PORT_DEVICE_INVALID;
15306 				devlist->map_type = PORT_DEVICE_OLD;
15307 
15308 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15309 				    &devlist->map_nwwn, FC_WWN_SIZE);
15310 
15311 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15312 				    &devlist->map_pwwn, FC_WWN_SIZE);
15313 
15314 				devlist++;
15315 			}
15316 		}
15317 	}
15318 
15319 	mutex_exit(&pptr->port_mutex);
15320 
15321 	return (devptr);
15322 }
15323 /*
15324  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15325  */
15326 static void
15327 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15328 {
15329 	int i;
15330 	struct fcp_tgt	*ptgt;
15331 	struct fcp_lun	*plun;
15332 
15333 	for (i = 0; i < FCP_NUM_HASH; i++) {
15334 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15335 		    ptgt = ptgt->tgt_next) {
15336 			mutex_enter(&ptgt->tgt_mutex);
15337 			for (plun = ptgt->tgt_lun; plun != NULL;
15338 			    plun = plun->lun_next) {
15339 				if (plun->lun_mpxio &&
15340 				    plun->lun_state & FCP_LUN_BUSY) {
15341 					if (!fcp_pass_to_hp(pptr, plun,
15342 					    plun->lun_cip,
15343 					    FCP_MPXIO_PATH_SET_BUSY,
15344 					    pptr->port_link_cnt,
15345 					    ptgt->tgt_change_cnt, 0, 0)) {
15346 						FCP_TRACE(fcp_logq,
15347 						    pptr->port_instbuf,
15348 						    fcp_trace,
15349 						    FCP_BUF_LEVEL_2, 0,
15350 						    "path_verifybusy: "
15351 						    "disable lun %p failed!",
15352 						    plun);
15353 					}
15354 				}
15355 			}
15356 			mutex_exit(&ptgt->tgt_mutex);
15357 		}
15358 	}
15359 }
15360 
15361 static int
15362 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15363 {
15364 	dev_info_t		*cdip = NULL;
15365 	dev_info_t		*pdip = NULL;
15366 
15367 	ASSERT(plun);
15368 
15369 	mutex_enter(&plun->lun_mutex);
15370 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15371 		mutex_exit(&plun->lun_mutex);
15372 		return (NDI_FAILURE);
15373 	}
15374 	mutex_exit(&plun->lun_mutex);
15375 	cdip = mdi_pi_get_client(PIP(cip));
15376 	pdip = mdi_pi_get_phci(PIP(cip));
15377 
15378 	ASSERT(cdip != NULL);
15379 	ASSERT(pdip != NULL);
15380 
15381 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15382 		/* LUN ready for IO */
15383 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15384 	} else {
15385 		/* LUN busy to accept IO */
15386 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15387 	}
15388 	return (NDI_SUCCESS);
15389 }
15390 
15391 /*
15392  * Caller must free the returned string of MAXPATHLEN len
15393  * If the device is offline (-1 instance number) NULL
15394  * will be returned.
15395  */
15396 static char *
15397 fcp_get_lun_path(struct fcp_lun *plun) {
15398 	dev_info_t	*dip = NULL;
15399 	char	*path = NULL;
15400 	if (plun == NULL) {
15401 		return (NULL);
15402 	}
15403 	if (plun->lun_mpxio == 0) {
15404 		dip = DIP(plun->lun_cip);
15405 	} else {
15406 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15407 	}
15408 	if (dip == NULL) {
15409 		return (NULL);
15410 	}
15411 	if (ddi_get_instance(dip) < 0) {
15412 		return (NULL);
15413 	}
15414 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15415 	if (path == NULL) {
15416 		return (NULL);
15417 	}
15418 
15419 	(void) ddi_pathname(dip, path);
15420 	/*
15421 	 * In reality, the user wants a fully valid path (one they can open)
15422 	 * but this string is lacking the mount point, and the minor node.
15423 	 * It would be nice if we could "figure these out" somehow
15424 	 * and fill them in.  Otherwise, the userland code has to understand
15425 	 * driver specific details of which minor node is the "best" or
15426 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15427 	 * which tape doesn't rewind)
15428 	 */
15429 	return (path);
15430 }
15431 
15432 static int
15433 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15434     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15435 {
15436 	int64_t reset_delay;
15437 	int rval, retry = 0;
15438 	struct fcp_port *pptr = fcp_dip2port(parent);
15439 
15440 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15441 	    (lbolt64 - pptr->port_attach_time);
15442 	if (reset_delay < 0) {
15443 		reset_delay = 0;
15444 	}
15445 
15446 	if (fcp_bus_config_debug) {
15447 		flag |= NDI_DEVI_DEBUG;
15448 	}
15449 
15450 	switch (op) {
15451 	case BUS_CONFIG_ONE:
15452 		/*
15453 		 * Retry the command since we need to ensure
15454 		 * the fabric devices are available for root
15455 		 */
15456 		while (retry++ < fcp_max_bus_config_retries) {
15457 			rval =	(ndi_busop_bus_config(parent,
15458 			    flag | NDI_MDI_FALLBACK, op,
15459 			    arg, childp, (clock_t)reset_delay));
15460 			if (rval == 0) {
15461 				return (rval);
15462 			}
15463 		}
15464 
15465 		/*
15466 		 * drain taskq to make sure nodes are created and then
15467 		 * try again.
15468 		 */
15469 		taskq_wait(DEVI(parent)->devi_taskq);
15470 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15471 		    op, arg, childp, 0));
15472 
15473 	case BUS_CONFIG_DRIVER:
15474 	case BUS_CONFIG_ALL: {
15475 		/*
15476 		 * delay till all devices report in (port_tmp_cnt == 0)
15477 		 * or FCP_INIT_WAIT_TIMEOUT
15478 		 */
15479 		mutex_enter(&pptr->port_mutex);
15480 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15481 			(void) cv_timedwait(&pptr->port_config_cv,
15482 			    &pptr->port_mutex,
15483 			    ddi_get_lbolt() + (clock_t)reset_delay);
15484 			reset_delay =
15485 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15486 			    (lbolt64 - pptr->port_attach_time);
15487 		}
15488 		mutex_exit(&pptr->port_mutex);
15489 		/* drain taskq to make sure nodes are created */
15490 		taskq_wait(DEVI(parent)->devi_taskq);
15491 		return (ndi_busop_bus_config(parent, flag, op,
15492 		    arg, childp, 0));
15493 	}
15494 
15495 	default:
15496 		return (NDI_FAILURE);
15497 	}
15498 	/*NOTREACHED*/
15499 }
15500 
15501 static int
15502 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15503     ddi_bus_config_op_t op, void *arg)
15504 {
15505 	if (fcp_bus_config_debug) {
15506 		flag |= NDI_DEVI_DEBUG;
15507 	}
15508 
15509 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15510 }
15511 
15512 
15513 /*
15514  * Routine to copy GUID into the lun structure.
15515  * returns 0 if copy was successful and 1 if encountered a
15516  * failure and did not copy the guid.
15517  */
15518 static int
15519 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15520 {
15521 
15522 	int retval = 0;
15523 
15524 	/* add one for the null terminator */
15525 	const unsigned int len = strlen(guidp) + 1;
15526 
15527 	if ((guidp == NULL) || (plun == NULL)) {
15528 		return (1);
15529 	}
15530 
15531 	/*
15532 	 * if the plun->lun_guid already has been allocated,
15533 	 * then check the size. if the size is exact, reuse
15534 	 * it....if not free it an allocate the required size.
15535 	 * The reallocation should NOT typically happen
15536 	 * unless the GUIDs reported changes between passes.
15537 	 * We free up and alloc again even if the
15538 	 * size was more than required. This is due to the
15539 	 * fact that the field lun_guid_size - serves
15540 	 * dual role of indicating the size of the wwn
15541 	 * size and ALSO the allocation size.
15542 	 */
15543 	if (plun->lun_guid) {
15544 		if (plun->lun_guid_size != len) {
15545 			/*
15546 			 * free the allocated memory and
15547 			 * initialize the field
15548 			 * lun_guid_size to 0.
15549 			 */
15550 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15551 			plun->lun_guid = NULL;
15552 			plun->lun_guid_size = 0;
15553 		}
15554 	}
15555 	/*
15556 	 * alloc only if not already done.
15557 	 */
15558 	if (plun->lun_guid == NULL) {
15559 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15560 		if (plun->lun_guid == NULL) {
15561 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15562 			    "Unable to allocate"
15563 			    "Memory for GUID!!! size %d", len);
15564 			retval = 1;
15565 		} else {
15566 			plun->lun_guid_size = len;
15567 		}
15568 	}
15569 	if (plun->lun_guid) {
15570 		/*
15571 		 * now copy the GUID
15572 		 */
15573 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15574 	}
15575 	return (retval);
15576 }
15577 
15578 /*
15579  * fcp_reconfig_wait
15580  *
15581  * Wait for a rediscovery/reconfiguration to complete before continuing.
15582  */
15583 
15584 static void
15585 fcp_reconfig_wait(struct fcp_port *pptr)
15586 {
15587 	clock_t		reconfig_start, wait_timeout;
15588 
15589 	/*
15590 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15591 	 * reconfiguration in progress.
15592 	 */
15593 
15594 	mutex_enter(&pptr->port_mutex);
15595 	if (pptr->port_tmp_cnt == 0) {
15596 		mutex_exit(&pptr->port_mutex);
15597 		return;
15598 	}
15599 	mutex_exit(&pptr->port_mutex);
15600 
15601 	/*
15602 	 * If we cause a reconfig by raising power, delay until all devices
15603 	 * report in (port_tmp_cnt returns to 0)
15604 	 */
15605 
15606 	reconfig_start = ddi_get_lbolt();
15607 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15608 
15609 	mutex_enter(&pptr->port_mutex);
15610 
15611 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15612 	    pptr->port_tmp_cnt) {
15613 
15614 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15615 		    reconfig_start + wait_timeout);
15616 	}
15617 
15618 	mutex_exit(&pptr->port_mutex);
15619 
15620 	/*
15621 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15622 	 * we want may still be ok.  If not, it will error out later
15623 	 */
15624 }
15625 
15626 /*
15627  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15628  * We rely on the fcp_global_mutex to provide protection against changes to
15629  * the fcp_lun_blacklist.
15630  *
15631  * You can describe a list of target port WWNs and LUN numbers which will
15632  * not be configured. LUN numbers will be interpreted as decimal. White
15633  * spaces and ',' can be used in the list of LUN numbers.
15634  *
15635  * To prevent LUNs 1 and 2 from being configured for target
15636  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15637  *
15638  * pwwn-lun-blacklist=
15639  * "510000f010fd92a1,1,2",
15640  * "510000e012079df1,1,2";
15641  */
15642 static void
15643 fcp_read_blacklist(dev_info_t *dip,
15644     struct fcp_black_list_entry **pplun_blacklist) {
15645 	char **prop_array	= NULL;
15646 	char *curr_pwwn		= NULL;
15647 	char *curr_lun		= NULL;
15648 	uint32_t prop_item	= 0;
15649 	int idx			= 0;
15650 	int len			= 0;
15651 
15652 	ASSERT(mutex_owned(&fcp_global_mutex));
15653 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15654 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15655 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15656 		return;
15657 	}
15658 
15659 	for (idx = 0; idx < prop_item; idx++) {
15660 
15661 		curr_pwwn = prop_array[idx];
15662 		while (*curr_pwwn == ' ') {
15663 			curr_pwwn++;
15664 		}
15665 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15666 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15667 			    ", please check.", curr_pwwn);
15668 			continue;
15669 		}
15670 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15671 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15672 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15673 			    ", please check.", curr_pwwn);
15674 			continue;
15675 		}
15676 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15677 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15678 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15679 				    "blacklist, please check.", curr_pwwn);
15680 				break;
15681 			}
15682 		}
15683 		if (len != sizeof (la_wwn_t) * 2) {
15684 			continue;
15685 		}
15686 
15687 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15688 		*(curr_lun - 1) = '\0';
15689 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15690 	}
15691 
15692 	ddi_prop_free(prop_array);
15693 }
15694 
15695 /*
15696  * Get the masking info about one remote target port designated by wwn.
15697  * Lun ids could be separated by ',' or white spaces.
15698  */
15699 static void
15700 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15701     struct fcp_black_list_entry **pplun_blacklist) {
15702 	int		idx			= 0;
15703 	uint32_t	offset			= 0;
15704 	unsigned long	lun_id			= 0;
15705 	char		lunid_buf[16];
15706 	char		*pend			= NULL;
15707 	int		illegal_digit		= 0;
15708 
15709 	while (offset < strlen(curr_lun)) {
15710 		while ((curr_lun[offset + idx] != ',') &&
15711 		    (curr_lun[offset + idx] != '\0') &&
15712 		    (curr_lun[offset + idx] != ' ')) {
15713 			if (isdigit(curr_lun[offset + idx]) == 0) {
15714 				illegal_digit++;
15715 			}
15716 			idx++;
15717 		}
15718 		if (illegal_digit > 0) {
15719 			offset += (idx+1);	/* To the start of next lun */
15720 			idx = 0;
15721 			illegal_digit = 0;
15722 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15723 			    "the blacklist, please check digits.",
15724 			    curr_lun, curr_pwwn);
15725 			continue;
15726 		}
15727 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15728 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15729 			    "the blacklist, please check the length of LUN#.",
15730 			    curr_lun, curr_pwwn);
15731 			break;
15732 		}
15733 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15734 		    offset++;
15735 		    continue;
15736 		}
15737 
15738 		bcopy(curr_lun + offset, lunid_buf, idx);
15739 		lunid_buf[idx] = '\0';
15740 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15741 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15742 		} else {
15743 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15744 			    "the blacklist, please check %s.",
15745 			    curr_lun, curr_pwwn, lunid_buf);
15746 		}
15747 		offset += (idx+1);	/* To the start of next lun */
15748 		idx = 0;
15749 	}
15750 }
15751 
15752 /*
15753  * Add one masking record
15754  */
15755 static void
15756 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15757     struct fcp_black_list_entry **pplun_blacklist) {
15758 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15759 	struct fcp_black_list_entry	*new_entry	= NULL;
15760 	la_wwn_t			wwn;
15761 
15762 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15763 	while (tmp_entry) {
15764 		if ((bcmp(&tmp_entry->wwn, &wwn,
15765 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15766 			return;
15767 		}
15768 
15769 		tmp_entry = tmp_entry->next;
15770 	}
15771 
15772 	/* add to black list */
15773 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15774 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15775 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15776 	new_entry->lun = lun_id;
15777 	new_entry->masked = 0;
15778 	new_entry->next = *pplun_blacklist;
15779 	*pplun_blacklist = new_entry;
15780 }
15781 
15782 /*
15783  * Check if we should mask the specified lun of this fcp_tgt
15784  */
15785 static int
15786 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
15787 	struct fcp_black_list_entry *remote_port;
15788 
15789 	remote_port = fcp_lun_blacklist;
15790 	while (remote_port != NULL) {
15791 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
15792 			if (remote_port->lun == lun_id) {
15793 				remote_port->masked++;
15794 				if (remote_port->masked == 1) {
15795 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
15796 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
15797 					    "is masked due to black listing.\n",
15798 					    lun_id, wwn->raw_wwn[0],
15799 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
15800 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
15801 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
15802 					    wwn->raw_wwn[7]);
15803 				}
15804 				return (TRUE);
15805 			}
15806 		}
15807 		remote_port = remote_port->next;
15808 	}
15809 	return (FALSE);
15810 }
15811 
15812 /*
15813  * Release all allocated resources
15814  */
15815 static void
15816 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
15817 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15818 	struct fcp_black_list_entry	*current_entry	= NULL;
15819 
15820 	ASSERT(mutex_owned(&fcp_global_mutex));
15821 	/*
15822 	 * Traverse all luns
15823 	 */
15824 	while (tmp_entry) {
15825 		current_entry = tmp_entry;
15826 		tmp_entry = tmp_entry->next;
15827 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
15828 	}
15829 	*pplun_blacklist = NULL;
15830 }
15831