1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433     int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435     uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439     uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446     int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449     uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451     uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461     uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463     int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466     int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470     int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474     int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476     int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478     int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480     int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486     fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490     struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496     fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500     int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503     uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505     int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508     int);
509 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512     int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516     int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520     int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526     int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530     int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532     dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534     int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536     int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541     struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544     uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546     struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551     child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554     int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557     int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559     struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561     uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581     uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585     struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588     int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593     int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 
596 /*
597  * New functions added for mpxio support
598  */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602     int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604     dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610     int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612     fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 
615 /*
616  * New functions added for lun masking support
617  */
618 static void fcp_read_blacklist(dev_info_t *dip,
619     struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621     struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 
627 extern struct mod_ops	mod_driverops;
628 /*
629  * This variable is defined in modctl.c and set to '1' after the root driver
630  * and fs are loaded.  It serves as an indication that the root filesystem can
631  * be used.
632  */
633 extern int		modrootloaded;
634 /*
635  * This table contains strings associated with the SCSI sense key codes.  It
636  * is used by FCP to print a clear explanation of the code returned in the
637  * sense information by a device.
638  */
639 extern char		*sense_keys[];
640 /*
641  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
642  * under this device that the paths to a physical device are created when
643  * MPxIO is used.
644  */
645 extern dev_info_t	*scsi_vhci_dip;
646 
647 /*
648  * Report lun processing
649  */
650 #define	FCP_LUN_ADDRESSING		0x80
651 #define	FCP_PD_ADDRESSING		0x00
652 #define	FCP_VOLUME_ADDRESSING		0x40
653 
654 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
655 #define	MAX_INT_DMA			0x7fffffff
656 #define	FCP_MAX_SENSE_LEN		252
657 #define	FCP_MAX_RESPONSE_LEN		0xffffff
658 /*
659  * Property definitions
660  */
661 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
662 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
663 #define	TARGET_PROP	(char *)fcp_target_prop
664 #define	LUN_PROP	(char *)fcp_lun_prop
665 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
666 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
667 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
668 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
669 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
670 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
671 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
672 /*
673  * Short hand macros.
674  */
675 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
676 #define	LUN_TGT		(plun->lun_tgt)
677 
678 /*
679  * Driver private macros
680  */
681 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
682 			((x) >= 'a' && (x) <= 'f') ?			\
683 			((x) - 'a' + 10) : ((x) - 'A' + 10))
684 
685 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
686 
687 #define	FCP_N_NDI_EVENTS						\
688 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
689 
690 #define	FCP_LINK_STATE_CHANGED(p, c)			\
691 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
692 
693 #define	FCP_TGT_STATE_CHANGED(t, c)			\
694 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
695 
696 #define	FCP_STATE_CHANGED(p, t, c)		\
697 	(FCP_TGT_STATE_CHANGED(t, c))
698 
699 #define	FCP_MUST_RETRY(fpkt)				\
700 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
701 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
702 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
703 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
704 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
705 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
706 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
707 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
708 
709 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
710 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
711 	(es)->es_add_code == 0x3f &&		\
712 	(es)->es_qual_code == 0x0e)
713 
714 #define	FCP_SENSE_NO_LUN(es)			\
715 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
716 	(es)->es_add_code == 0x25 &&		\
717 	(es)->es_qual_code == 0x0)
718 
719 #define	FCP_VERSION		"1.188"
720 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
721 
722 #define	FCP_NUM_ELEMENTS(array)			\
723 	(sizeof (array) / sizeof ((array)[0]))
724 
725 /*
726  * Debugging, Error reporting, and tracing
727  */
728 #define	FCP_LOG_SIZE		1024 * 1024
729 
730 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
731 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
732 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
733 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
734 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
735 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
736 #define	FCP_LEVEL_7		0x00040
737 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
738 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
739 
740 
741 
742 /*
743  * Log contents to system messages file
744  */
745 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
746 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
747 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
748 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
749 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
750 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
751 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
752 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
753 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
754 
755 
756 /*
757  * Log contents to trace buffer
758  */
759 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
760 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
761 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
762 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
763 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
764 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
765 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
766 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
767 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
768 
769 
770 /*
771  * Log contents to both system messages file and trace buffer
772  */
773 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
774 				FC_TRACE_LOG_MSG)
775 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
776 				FC_TRACE_LOG_MSG)
777 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
778 				FC_TRACE_LOG_MSG)
779 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
780 				FC_TRACE_LOG_MSG)
781 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
782 				FC_TRACE_LOG_MSG)
783 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
784 				FC_TRACE_LOG_MSG)
785 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
786 				FC_TRACE_LOG_MSG)
787 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #ifdef DEBUG
792 #define	FCP_DTRACE	fc_trace_debug
793 #else
794 #define	FCP_DTRACE
795 #endif
796 
797 #define	FCP_TRACE	fc_trace_debug
798 
799 static struct cb_ops fcp_cb_ops = {
800 	fcp_open,			/* open */
801 	fcp_close,			/* close */
802 	nodev,				/* strategy */
803 	nodev,				/* print */
804 	nodev,				/* dump */
805 	nodev,				/* read */
806 	nodev,				/* write */
807 	fcp_ioctl,			/* ioctl */
808 	nodev,				/* devmap */
809 	nodev,				/* mmap */
810 	nodev,				/* segmap */
811 	nochpoll,			/* chpoll */
812 	ddi_prop_op,			/* cb_prop_op */
813 	0,				/* streamtab */
814 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
815 	CB_REV,				/* rev */
816 	nodev,				/* aread */
817 	nodev				/* awrite */
818 };
819 
820 
821 static struct dev_ops fcp_ops = {
822 	DEVO_REV,
823 	0,
824 	ddi_getinfo_1to1,
825 	nulldev,		/* identify */
826 	nulldev,		/* probe */
827 	fcp_attach,		/* attach and detach are mandatory */
828 	fcp_detach,
829 	nodev,			/* reset */
830 	&fcp_cb_ops,		/* cb_ops */
831 	NULL,			/* bus_ops */
832 	NULL,			/* power */
833 };
834 
835 
836 char *fcp_version = FCP_NAME_VERSION;
837 
838 static struct modldrv modldrv = {
839 	&mod_driverops,
840 	FCP_NAME_VERSION,
841 	&fcp_ops
842 };
843 
844 
845 static struct modlinkage modlinkage = {
846 	MODREV_1,
847 	&modldrv,
848 	NULL
849 };
850 
851 
852 static fc_ulp_modinfo_t fcp_modinfo = {
853 	&fcp_modinfo,			/* ulp_handle */
854 	FCTL_ULP_MODREV_4,		/* ulp_rev */
855 	FC4_SCSI_FCP,			/* ulp_type */
856 	"fcp",				/* ulp_name */
857 	FCP_STATEC_MASK,		/* ulp_statec_mask */
858 	fcp_port_attach,		/* ulp_port_attach */
859 	fcp_port_detach,		/* ulp_port_detach */
860 	fcp_port_ioctl,			/* ulp_port_ioctl */
861 	fcp_els_callback,		/* ulp_els_callback */
862 	fcp_data_callback,		/* ulp_data_callback */
863 	fcp_statec_callback		/* ulp_statec_callback */
864 };
865 
866 #ifdef	DEBUG
867 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
868 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
869 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
870 				FCP_LEVEL_6 | FCP_LEVEL_7)
871 #else
872 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
873 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
874 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
875 				FCP_LEVEL_6 | FCP_LEVEL_7)
876 #endif
877 
878 /* FCP global variables */
879 int			fcp_bus_config_debug = 0;
880 static int		fcp_log_size = FCP_LOG_SIZE;
881 static int		fcp_trace = FCP_TRACE_DEFAULT;
882 static fc_trace_logq_t	*fcp_logq = NULL;
883 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
884 /*
885  * The auto-configuration is set by default.  The only way of disabling it is
886  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
887  */
888 static int		fcp_enable_auto_configuration = 1;
889 static int		fcp_max_bus_config_retries	= 4;
890 static int		fcp_lun_ready_retry = 300;
891 /*
892  * The value assigned to the following variable has changed several times due
893  * to a problem with the data underruns reporting of some firmware(s).	The
894  * current value of 50 gives a timeout value of 25 seconds for a max number
895  * of 256 LUNs.
896  */
897 static int		fcp_max_target_retries = 50;
898 /*
899  * Watchdog variables
900  * ------------------
901  *
902  * fcp_watchdog_init
903  *
904  *	Indicates if the watchdog timer is running or not.  This is actually
905  *	a counter of the number of Fibre Channel ports that attached.  When
906  *	the first port attaches the watchdog is started.  When the last port
907  *	detaches the watchdog timer is stopped.
908  *
909  * fcp_watchdog_time
910  *
911  *	This is the watchdog clock counter.  It is incremented by
912  *	fcp_watchdog_time each time the watchdog timer expires.
913  *
914  * fcp_watchdog_timeout
915  *
916  *	Increment value of the variable fcp_watchdog_time as well as the
917  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
918  *	is strange that this is not a #define	but a variable since the code
919  *	never changes this value.  The reason why it can be said that the
920  *	unit is 1 second is because the number of ticks for the watchdog
921  *	timer is determined like this:
922  *
923  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
924  *				  drv_usectohz(1000000);
925  *
926  *	The value 1000000 is hard coded in the code.
927  *
928  * fcp_watchdog_tick
929  *
930  *	Watchdog timer value in ticks.
931  */
932 static int		fcp_watchdog_init = 0;
933 static int		fcp_watchdog_time = 0;
934 static int		fcp_watchdog_timeout = 1;
935 static int		fcp_watchdog_tick;
936 
937 /*
938  * fcp_offline_delay is a global variable to enable customisation of
939  * the timeout on link offlines or RSCNs. The default value is set
940  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
941  * specified in FCP4 Chapter 11 (see www.t10.org).
942  *
943  * The variable fcp_offline_delay is specified in SECONDS.
944  *
945  * If we made this a static var then the user would not be able to
946  * change it. This variable is set in fcp_attach().
947  */
948 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
949 
950 static void		*fcp_softstate = NULL; /* for soft state */
951 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
952 static kmutex_t		fcp_global_mutex;
953 static kmutex_t		fcp_ioctl_mutex;
954 static dev_info_t	*fcp_global_dip = NULL;
955 static timeout_id_t	fcp_watchdog_id;
956 const char		*fcp_lun_prop = "lun";
957 const char		*fcp_sam_lun_prop = "sam-lun";
958 const char		*fcp_target_prop = "target";
959 /*
960  * NOTE: consumers of "node-wwn" property include stmsboot in ON
961  * consolidation.
962  */
963 const char		*fcp_node_wwn_prop = "node-wwn";
964 const char		*fcp_port_wwn_prop = "port-wwn";
965 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
966 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
967 const char		*fcp_manual_config_only = "manual_configuration_only";
968 const char		*fcp_init_port_prop = "initiator-port";
969 const char		*fcp_tgt_port_prop = "target-port";
970 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
971 
972 static struct fcp_port	*fcp_port_head = NULL;
973 static ddi_eventcookie_t	fcp_insert_eid;
974 static ddi_eventcookie_t	fcp_remove_eid;
975 
976 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
977 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
978 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
979 };
980 
981 /*
982  * List of valid commands for the scsi_ioctl call
983  */
984 static uint8_t scsi_ioctl_list[] = {
985 	SCMD_INQUIRY,
986 	SCMD_REPORT_LUN,
987 	SCMD_READ_CAPACITY
988 };
989 
990 /*
991  * this is used to dummy up a report lun response for cases
992  * where the target doesn't support it
993  */
994 static uchar_t fcp_dummy_lun[] = {
995 	0x00,		/* MSB length (length = no of luns * 8) */
996 	0x00,
997 	0x00,
998 	0x08,		/* LSB length */
999 	0x00,		/* MSB reserved */
1000 	0x00,
1001 	0x00,
1002 	0x00,		/* LSB reserved */
1003 	FCP_PD_ADDRESSING,
1004 	0x00,		/* LUN is ZERO at the first level */
1005 	0x00,
1006 	0x00,		/* second level is zero */
1007 	0x00,
1008 	0x00,		/* third level is zero */
1009 	0x00,
1010 	0x00		/* fourth level is zero */
1011 };
1012 
1013 static uchar_t fcp_alpa_to_switch[] = {
1014 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1015 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1016 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1017 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1018 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1019 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1020 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1021 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1022 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1023 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1024 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1025 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1026 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1027 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1030 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1031 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1032 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1033 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1034 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1035 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1036 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1037 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 };
1039 
1040 static caddr_t pid = "SESS01	      ";
1041 
1042 #if	!defined(lint)
1043 
1044 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1045     fcp_port::fcp_next fcp_watchdog_id))
1046 
1047 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1048 
1049 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1050     fcp_insert_eid
1051     fcp_remove_eid
1052     fcp_watchdog_time))
1053 
1054 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1055     fcp_cb_ops
1056     fcp_ops
1057     callb_cpr))
1058 
1059 #endif /* lint */
1060 
1061 /*
1062  * This table is used to determine whether or not it's safe to copy in
1063  * the target node name for a lun.  Since all luns behind the same target
1064  * have the same wwnn, only tagets that do not support multiple luns are
1065  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1066  */
1067 
1068 char *fcp_symmetric_disk_table[] = {
1069 	"SEAGATE ST",
1070 	"IBM	 DDYFT",
1071 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1072 	"SUN	 SENA",		/* SES device */
1073 	"SUN	 SESS01"	/* VICOM SVE box */
1074 };
1075 
1076 int fcp_symmetric_disk_table_size =
1077 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1078 
1079 /*
1080  * The _init(9e) return value should be that of mod_install(9f). Under
1081  * some circumstances, a failure may not be related mod_install(9f) and
1082  * one would then require a return value to indicate the failure. Looking
1083  * at mod_install(9f), it is expected to return 0 for success and non-zero
1084  * for failure. mod_install(9f) for device drivers, further goes down the
1085  * calling chain and ends up in ddi_installdrv(), whose return values are
1086  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1087  * calling chain of mod_install(9f) which return values like EINVAL and
1088  * in some even return -1.
1089  *
1090  * To work around the vagaries of the mod_install() calling chain, return
1091  * either 0 or ENODEV depending on the success or failure of mod_install()
1092  */
1093 int
1094 _init(void)
1095 {
1096 	int rval;
1097 
1098 	/*
1099 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1100 	 * before registering with the transport first.
1101 	 */
1102 	if (ddi_soft_state_init(&fcp_softstate,
1103 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1104 		return (EINVAL);
1105 	}
1106 
1107 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1108 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1109 
1110 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1111 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1112 		mutex_destroy(&fcp_global_mutex);
1113 		mutex_destroy(&fcp_ioctl_mutex);
1114 		ddi_soft_state_fini(&fcp_softstate);
1115 		return (ENODEV);
1116 	}
1117 
1118 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1119 
1120 	if ((rval = mod_install(&modlinkage)) != 0) {
1121 		fc_trace_free_logq(fcp_logq);
1122 		(void) fc_ulp_remove(&fcp_modinfo);
1123 		mutex_destroy(&fcp_global_mutex);
1124 		mutex_destroy(&fcp_ioctl_mutex);
1125 		ddi_soft_state_fini(&fcp_softstate);
1126 		rval = ENODEV;
1127 	}
1128 
1129 	return (rval);
1130 }
1131 
1132 
1133 /*
1134  * the system is done with us as a driver, so clean up
1135  */
1136 int
1137 _fini(void)
1138 {
1139 	int rval;
1140 
1141 	/*
1142 	 * don't start cleaning up until we know that the module remove
1143 	 * has worked  -- if this works, then we know that each instance
1144 	 * has successfully been DDI_DETACHed
1145 	 */
1146 	if ((rval = mod_remove(&modlinkage)) != 0) {
1147 		return (rval);
1148 	}
1149 
1150 	(void) fc_ulp_remove(&fcp_modinfo);
1151 
1152 	ddi_soft_state_fini(&fcp_softstate);
1153 	mutex_destroy(&fcp_global_mutex);
1154 	mutex_destroy(&fcp_ioctl_mutex);
1155 	fc_trace_free_logq(fcp_logq);
1156 
1157 	return (rval);
1158 }
1159 
1160 
1161 int
1162 _info(struct modinfo *modinfop)
1163 {
1164 	return (mod_info(&modlinkage, modinfop));
1165 }
1166 
1167 
1168 /*
1169  * attach the module
1170  */
1171 static int
1172 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1173 {
1174 	int rval = DDI_SUCCESS;
1175 
1176 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1177 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1178 
1179 	if (cmd == DDI_ATTACH) {
1180 		/* The FCP pseudo device is created here. */
1181 		mutex_enter(&fcp_global_mutex);
1182 		fcp_global_dip = devi;
1183 		mutex_exit(&fcp_global_mutex);
1184 
1185 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1186 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1187 			ddi_report_dev(fcp_global_dip);
1188 		} else {
1189 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1190 			mutex_enter(&fcp_global_mutex);
1191 			fcp_global_dip = NULL;
1192 			mutex_exit(&fcp_global_mutex);
1193 
1194 			rval = DDI_FAILURE;
1195 		}
1196 		/*
1197 		 * We check the fcp_offline_delay property at this
1198 		 * point. This variable is global for the driver,
1199 		 * not specific to an instance.
1200 		 *
1201 		 * We do not recommend setting the value to less
1202 		 * than 10 seconds (RA_TOV_els), or greater than
1203 		 * 60 seconds.
1204 		 */
1205 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1206 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1207 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1208 		if ((fcp_offline_delay < 10) ||
1209 		    (fcp_offline_delay > 60)) {
1210 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1211 			    "to %d second(s). This is outside the "
1212 			    "recommended range of 10..60 seconds.",
1213 			    fcp_offline_delay);
1214 		}
1215 	}
1216 
1217 	return (rval);
1218 }
1219 
1220 
1221 /*ARGSUSED*/
1222 static int
1223 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1224 {
1225 	int	res = DDI_SUCCESS;
1226 
1227 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1228 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1229 
1230 	if (cmd == DDI_DETACH) {
1231 		/*
1232 		 * Check if there are active ports/threads. If there
1233 		 * are any, we will fail, else we will succeed (there
1234 		 * should not be much to clean up)
1235 		 */
1236 		mutex_enter(&fcp_global_mutex);
1237 		FCP_DTRACE(fcp_logq, "fcp",
1238 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1239 		    (void *) fcp_port_head);
1240 
1241 		if (fcp_port_head == NULL) {
1242 			ddi_remove_minor_node(fcp_global_dip, NULL);
1243 			fcp_global_dip = NULL;
1244 			mutex_exit(&fcp_global_mutex);
1245 		} else {
1246 			mutex_exit(&fcp_global_mutex);
1247 			res = DDI_FAILURE;
1248 		}
1249 	}
1250 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1251 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1252 
1253 	return (res);
1254 }
1255 
1256 
1257 /* ARGSUSED */
1258 static int
1259 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1260 {
1261 	if (otype != OTYP_CHR) {
1262 		return (EINVAL);
1263 	}
1264 
1265 	/*
1266 	 * Allow only root to talk;
1267 	 */
1268 	if (drv_priv(credp)) {
1269 		return (EPERM);
1270 	}
1271 
1272 	mutex_enter(&fcp_global_mutex);
1273 	if (fcp_oflag & FCP_EXCL) {
1274 		mutex_exit(&fcp_global_mutex);
1275 		return (EBUSY);
1276 	}
1277 
1278 	if (flag & FEXCL) {
1279 		if (fcp_oflag & FCP_OPEN) {
1280 			mutex_exit(&fcp_global_mutex);
1281 			return (EBUSY);
1282 		}
1283 		fcp_oflag |= FCP_EXCL;
1284 	}
1285 	fcp_oflag |= FCP_OPEN;
1286 	mutex_exit(&fcp_global_mutex);
1287 
1288 	return (0);
1289 }
1290 
1291 
1292 /* ARGSUSED */
1293 static int
1294 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1295 {
1296 	if (otype != OTYP_CHR) {
1297 		return (EINVAL);
1298 	}
1299 
1300 	mutex_enter(&fcp_global_mutex);
1301 	if (!(fcp_oflag & FCP_OPEN)) {
1302 		mutex_exit(&fcp_global_mutex);
1303 		return (ENODEV);
1304 	}
1305 	fcp_oflag = FCP_IDLE;
1306 	mutex_exit(&fcp_global_mutex);
1307 
1308 	return (0);
1309 }
1310 
1311 
1312 /*
1313  * fcp_ioctl
1314  *	Entry point for the FCP ioctls
1315  *
1316  * Input:
1317  *	See ioctl(9E)
1318  *
1319  * Output:
1320  *	See ioctl(9E)
1321  *
1322  * Returns:
1323  *	See ioctl(9E)
1324  *
1325  * Context:
1326  *	Kernel context.
1327  */
1328 /* ARGSUSED */
1329 static int
1330 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1331     int *rval)
1332 {
1333 	int			ret = 0;
1334 
1335 	mutex_enter(&fcp_global_mutex);
1336 	if (!(fcp_oflag & FCP_OPEN)) {
1337 		mutex_exit(&fcp_global_mutex);
1338 		return (ENXIO);
1339 	}
1340 	mutex_exit(&fcp_global_mutex);
1341 
1342 	switch (cmd) {
1343 	case FCP_TGT_INQUIRY:
1344 	case FCP_TGT_CREATE:
1345 	case FCP_TGT_DELETE:
1346 		ret = fcp_setup_device_data_ioctl(cmd,
1347 		    (struct fcp_ioctl *)data, mode, rval);
1348 		break;
1349 
1350 	case FCP_TGT_SEND_SCSI:
1351 		mutex_enter(&fcp_ioctl_mutex);
1352 		ret = fcp_setup_scsi_ioctl(
1353 		    (struct fcp_scsi_cmd *)data, mode, rval);
1354 		mutex_exit(&fcp_ioctl_mutex);
1355 		break;
1356 
1357 	case FCP_STATE_COUNT:
1358 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1359 		    mode, rval);
1360 		break;
1361 	case FCP_GET_TARGET_MAPPINGS:
1362 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1363 		    mode, rval);
1364 		break;
1365 	default:
1366 		fcp_log(CE_WARN, NULL,
1367 		    "!Invalid ioctl opcode = 0x%x", cmd);
1368 		ret	= EINVAL;
1369 	}
1370 
1371 	return (ret);
1372 }
1373 
1374 
1375 /*
1376  * fcp_setup_device_data_ioctl
1377  *	Setup handler for the "device data" style of
1378  *	ioctl for FCP.	See "fcp_util.h" for data structure
1379  *	definition.
1380  *
1381  * Input:
1382  *	cmd	= FCP ioctl command
1383  *	data	= ioctl data
1384  *	mode	= See ioctl(9E)
1385  *
1386  * Output:
1387  *	data	= ioctl data
1388  *	rval	= return value - see ioctl(9E)
1389  *
1390  * Returns:
1391  *	See ioctl(9E)
1392  *
1393  * Context:
1394  *	Kernel context.
1395  */
1396 /* ARGSUSED */
1397 static int
1398 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1399     int *rval)
1400 {
1401 	struct fcp_port	*pptr;
1402 	struct	device_data	*dev_data;
1403 	uint32_t		link_cnt;
1404 	la_wwn_t		*wwn_ptr = NULL;
1405 	struct fcp_tgt		*ptgt = NULL;
1406 	struct fcp_lun		*plun = NULL;
1407 	int			i, error;
1408 	struct fcp_ioctl	fioctl;
1409 
1410 #ifdef	_MULTI_DATAMODEL
1411 	switch (ddi_model_convert_from(mode & FMODELS)) {
1412 	case DDI_MODEL_ILP32: {
1413 		struct fcp32_ioctl f32_ioctl;
1414 
1415 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1416 		    sizeof (struct fcp32_ioctl), mode)) {
1417 			return (EFAULT);
1418 		}
1419 		fioctl.fp_minor = f32_ioctl.fp_minor;
1420 		fioctl.listlen = f32_ioctl.listlen;
1421 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1422 		break;
1423 	}
1424 	case DDI_MODEL_NONE:
1425 		if (ddi_copyin((void *)data, (void *)&fioctl,
1426 		    sizeof (struct fcp_ioctl), mode)) {
1427 			return (EFAULT);
1428 		}
1429 		break;
1430 	}
1431 
1432 #else	/* _MULTI_DATAMODEL */
1433 	if (ddi_copyin((void *)data, (void *)&fioctl,
1434 	    sizeof (struct fcp_ioctl), mode)) {
1435 		return (EFAULT);
1436 	}
1437 #endif	/* _MULTI_DATAMODEL */
1438 
1439 	/*
1440 	 * Right now we can assume that the minor number matches with
1441 	 * this instance of fp. If this changes we will need to
1442 	 * revisit this logic.
1443 	 */
1444 	mutex_enter(&fcp_global_mutex);
1445 	pptr = fcp_port_head;
1446 	while (pptr) {
1447 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1448 			break;
1449 		} else {
1450 			pptr = pptr->port_next;
1451 		}
1452 	}
1453 	mutex_exit(&fcp_global_mutex);
1454 	if (pptr == NULL) {
1455 		return (ENXIO);
1456 	}
1457 	mutex_enter(&pptr->port_mutex);
1458 
1459 
1460 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1461 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1462 		mutex_exit(&pptr->port_mutex);
1463 		return (ENOMEM);
1464 	}
1465 
1466 	if (ddi_copyin(fioctl.list, dev_data,
1467 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1468 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1469 		mutex_exit(&pptr->port_mutex);
1470 		return (EFAULT);
1471 	}
1472 	link_cnt = pptr->port_link_cnt;
1473 
1474 	if (cmd == FCP_TGT_INQUIRY) {
1475 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1476 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1477 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1478 			/* This ioctl is requesting INQ info of local HBA */
1479 			mutex_exit(&pptr->port_mutex);
1480 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1481 			dev_data[0].dev_status = 0;
1482 			if (ddi_copyout(dev_data, fioctl.list,
1483 			    (sizeof (struct device_data)) * fioctl.listlen,
1484 			    mode)) {
1485 				kmem_free(dev_data,
1486 				    sizeof (*dev_data) * fioctl.listlen);
1487 				return (EFAULT);
1488 			}
1489 			kmem_free(dev_data,
1490 			    sizeof (*dev_data) * fioctl.listlen);
1491 #ifdef	_MULTI_DATAMODEL
1492 			switch (ddi_model_convert_from(mode & FMODELS)) {
1493 			case DDI_MODEL_ILP32: {
1494 				struct fcp32_ioctl f32_ioctl;
1495 				f32_ioctl.fp_minor = fioctl.fp_minor;
1496 				f32_ioctl.listlen = fioctl.listlen;
1497 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1498 				if (ddi_copyout((void *)&f32_ioctl,
1499 				    (void *)data,
1500 				    sizeof (struct fcp32_ioctl), mode)) {
1501 					return (EFAULT);
1502 				}
1503 				break;
1504 			}
1505 			case DDI_MODEL_NONE:
1506 				if (ddi_copyout((void *)&fioctl, (void *)data,
1507 				    sizeof (struct fcp_ioctl), mode)) {
1508 					return (EFAULT);
1509 				}
1510 				break;
1511 			}
1512 #else	/* _MULTI_DATAMODEL */
1513 			if (ddi_copyout((void *)&fioctl, (void *)data,
1514 			    sizeof (struct fcp_ioctl), mode)) {
1515 				return (EFAULT);
1516 			}
1517 #endif	/* _MULTI_DATAMODEL */
1518 			return (0);
1519 		}
1520 	}
1521 
1522 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1523 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1524 		mutex_exit(&pptr->port_mutex);
1525 		return (ENXIO);
1526 	}
1527 
1528 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1529 	    i++) {
1530 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1531 
1532 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1533 
1534 
1535 		dev_data[i].dev_status = ENXIO;
1536 
1537 		if ((ptgt = fcp_lookup_target(pptr,
1538 		    (uchar_t *)wwn_ptr)) == NULL) {
1539 			mutex_exit(&pptr->port_mutex);
1540 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1541 			    wwn_ptr, &error, 0) == NULL) {
1542 				dev_data[i].dev_status = ENODEV;
1543 				mutex_enter(&pptr->port_mutex);
1544 				continue;
1545 			} else {
1546 
1547 				dev_data[i].dev_status = EAGAIN;
1548 
1549 				mutex_enter(&pptr->port_mutex);
1550 				continue;
1551 			}
1552 		} else {
1553 			mutex_enter(&ptgt->tgt_mutex);
1554 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1555 			    FCP_TGT_BUSY)) {
1556 				dev_data[i].dev_status = EAGAIN;
1557 				mutex_exit(&ptgt->tgt_mutex);
1558 				continue;
1559 			}
1560 
1561 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1562 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1563 					dev_data[i].dev_status = ENOTSUP;
1564 				} else {
1565 					dev_data[i].dev_status = ENXIO;
1566 				}
1567 				mutex_exit(&ptgt->tgt_mutex);
1568 				continue;
1569 			}
1570 
1571 			switch (cmd) {
1572 			case FCP_TGT_INQUIRY:
1573 				/*
1574 				 * The reason we give device type of
1575 				 * lun 0 only even though in some
1576 				 * cases(like maxstrat) lun 0 device
1577 				 * type may be 0x3f(invalid) is that
1578 				 * for bridge boxes target will appear
1579 				 * as luns and the first lun could be
1580 				 * a device that utility may not care
1581 				 * about (like a tape device).
1582 				 */
1583 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1584 				dev_data[i].dev_status = 0;
1585 				mutex_exit(&ptgt->tgt_mutex);
1586 
1587 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1588 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1589 				} else {
1590 					dev_data[i].dev0_type = plun->lun_type;
1591 				}
1592 				mutex_enter(&ptgt->tgt_mutex);
1593 				break;
1594 
1595 			case FCP_TGT_CREATE:
1596 				mutex_exit(&ptgt->tgt_mutex);
1597 				mutex_exit(&pptr->port_mutex);
1598 
1599 				/*
1600 				 * serialize state change call backs.
1601 				 * only one call back will be handled
1602 				 * at a time.
1603 				 */
1604 				mutex_enter(&fcp_global_mutex);
1605 				if (fcp_oflag & FCP_BUSY) {
1606 					mutex_exit(&fcp_global_mutex);
1607 					if (dev_data) {
1608 						kmem_free(dev_data,
1609 						    sizeof (*dev_data) *
1610 						    fioctl.listlen);
1611 					}
1612 					return (EBUSY);
1613 				}
1614 				fcp_oflag |= FCP_BUSY;
1615 				mutex_exit(&fcp_global_mutex);
1616 
1617 				dev_data[i].dev_status =
1618 				    fcp_create_on_demand(pptr,
1619 				    wwn_ptr->raw_wwn);
1620 
1621 				if (dev_data[i].dev_status != 0) {
1622 					char	buf[25];
1623 
1624 					for (i = 0; i < FC_WWN_SIZE; i++) {
1625 						(void) sprintf(&buf[i << 1],
1626 						    "%02x",
1627 						    wwn_ptr->raw_wwn[i]);
1628 					}
1629 
1630 					fcp_log(CE_WARN, pptr->port_dip,
1631 					    "!Failed to create nodes for"
1632 					    " pwwn=%s; error=%x", buf,
1633 					    dev_data[i].dev_status);
1634 				}
1635 
1636 				/* allow state change call backs again */
1637 				mutex_enter(&fcp_global_mutex);
1638 				fcp_oflag &= ~FCP_BUSY;
1639 				mutex_exit(&fcp_global_mutex);
1640 
1641 				mutex_enter(&pptr->port_mutex);
1642 				mutex_enter(&ptgt->tgt_mutex);
1643 
1644 				break;
1645 
1646 			case FCP_TGT_DELETE:
1647 				break;
1648 
1649 			default:
1650 				fcp_log(CE_WARN, pptr->port_dip,
1651 				    "!Invalid device data ioctl "
1652 				    "opcode = 0x%x", cmd);
1653 			}
1654 			mutex_exit(&ptgt->tgt_mutex);
1655 		}
1656 	}
1657 	mutex_exit(&pptr->port_mutex);
1658 
1659 	if (ddi_copyout(dev_data, fioctl.list,
1660 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1661 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1662 		return (EFAULT);
1663 	}
1664 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1665 
1666 #ifdef	_MULTI_DATAMODEL
1667 	switch (ddi_model_convert_from(mode & FMODELS)) {
1668 	case DDI_MODEL_ILP32: {
1669 		struct fcp32_ioctl f32_ioctl;
1670 
1671 		f32_ioctl.fp_minor = fioctl.fp_minor;
1672 		f32_ioctl.listlen = fioctl.listlen;
1673 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1674 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1675 		    sizeof (struct fcp32_ioctl), mode)) {
1676 			return (EFAULT);
1677 		}
1678 		break;
1679 	}
1680 	case DDI_MODEL_NONE:
1681 		if (ddi_copyout((void *)&fioctl, (void *)data,
1682 		    sizeof (struct fcp_ioctl), mode)) {
1683 			return (EFAULT);
1684 		}
1685 		break;
1686 	}
1687 #else	/* _MULTI_DATAMODEL */
1688 
1689 	if (ddi_copyout((void *)&fioctl, (void *)data,
1690 	    sizeof (struct fcp_ioctl), mode)) {
1691 		return (EFAULT);
1692 	}
1693 #endif	/* _MULTI_DATAMODEL */
1694 
1695 	return (0);
1696 }
1697 
1698 /*
1699  * Fetch the target mappings (path, etc.) for all LUNs
1700  * on this port.
1701  */
1702 /* ARGSUSED */
1703 static int
1704 fcp_get_target_mappings(struct fcp_ioctl *data,
1705     int mode, int *rval)
1706 {
1707 	struct fcp_port	    *pptr;
1708 	fc_hba_target_mappings_t    *mappings;
1709 	fc_hba_mapping_entry_t	    *map;
1710 	struct fcp_tgt	    *ptgt = NULL;
1711 	struct fcp_lun	    *plun = NULL;
1712 	int			    i, mapIndex, mappingSize;
1713 	int			    listlen;
1714 	struct fcp_ioctl	    fioctl;
1715 	char			    *path;
1716 	fcp_ent_addr_t		    sam_lun_addr;
1717 
1718 #ifdef	_MULTI_DATAMODEL
1719 	switch (ddi_model_convert_from(mode & FMODELS)) {
1720 	case DDI_MODEL_ILP32: {
1721 		struct fcp32_ioctl f32_ioctl;
1722 
1723 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1724 		    sizeof (struct fcp32_ioctl), mode)) {
1725 			return (EFAULT);
1726 		}
1727 		fioctl.fp_minor = f32_ioctl.fp_minor;
1728 		fioctl.listlen = f32_ioctl.listlen;
1729 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1730 		break;
1731 	}
1732 	case DDI_MODEL_NONE:
1733 		if (ddi_copyin((void *)data, (void *)&fioctl,
1734 		    sizeof (struct fcp_ioctl), mode)) {
1735 			return (EFAULT);
1736 		}
1737 		break;
1738 	}
1739 
1740 #else	/* _MULTI_DATAMODEL */
1741 	if (ddi_copyin((void *)data, (void *)&fioctl,
1742 	    sizeof (struct fcp_ioctl), mode)) {
1743 		return (EFAULT);
1744 	}
1745 #endif	/* _MULTI_DATAMODEL */
1746 
1747 	/*
1748 	 * Right now we can assume that the minor number matches with
1749 	 * this instance of fp. If this changes we will need to
1750 	 * revisit this logic.
1751 	 */
1752 	mutex_enter(&fcp_global_mutex);
1753 	pptr = fcp_port_head;
1754 	while (pptr) {
1755 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1756 			break;
1757 		} else {
1758 			pptr = pptr->port_next;
1759 		}
1760 	}
1761 	mutex_exit(&fcp_global_mutex);
1762 	if (pptr == NULL) {
1763 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1764 		    fioctl.fp_minor);
1765 		return (ENXIO);
1766 	}
1767 
1768 
1769 	/* We use listlen to show the total buffer size */
1770 	mappingSize = fioctl.listlen;
1771 
1772 	/* Now calculate how many mapping entries will fit */
1773 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1774 	    - sizeof (fc_hba_target_mappings_t);
1775 	if (listlen <= 0) {
1776 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1777 		return (ENXIO);
1778 	}
1779 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1780 
1781 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1782 		return (ENOMEM);
1783 	}
1784 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1785 
1786 	/* Now get to work */
1787 	mapIndex = 0;
1788 
1789 	mutex_enter(&pptr->port_mutex);
1790 	/* Loop through all targets on this port */
1791 	for (i = 0; i < FCP_NUM_HASH; i++) {
1792 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1793 		    ptgt = ptgt->tgt_next) {
1794 
1795 
1796 			/* Loop through all LUNs on this target */
1797 			for (plun = ptgt->tgt_lun; plun != NULL;
1798 			    plun = plun->lun_next) {
1799 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1800 					continue;
1801 				}
1802 
1803 				path = fcp_get_lun_path(plun);
1804 				if (path == NULL) {
1805 					continue;
1806 				}
1807 
1808 				if (mapIndex >= listlen) {
1809 					mapIndex ++;
1810 					kmem_free(path, MAXPATHLEN);
1811 					continue;
1812 				}
1813 				map = &mappings->entries[mapIndex++];
1814 				bcopy(path, map->targetDriver,
1815 				    sizeof (map->targetDriver));
1816 				map->d_id = ptgt->tgt_d_id;
1817 				map->busNumber = 0;
1818 				map->targetNumber = ptgt->tgt_d_id;
1819 				map->osLUN = plun->lun_num;
1820 
1821 				/*
1822 				 * We had swapped lun when we stored it in
1823 				 * lun_addr. We need to swap it back before
1824 				 * returning it to user land
1825 				 */
1826 
1827 				sam_lun_addr.ent_addr_0 =
1828 				    BE_16(plun->lun_addr.ent_addr_0);
1829 				sam_lun_addr.ent_addr_1 =
1830 				    BE_16(plun->lun_addr.ent_addr_1);
1831 				sam_lun_addr.ent_addr_2 =
1832 				    BE_16(plun->lun_addr.ent_addr_2);
1833 				sam_lun_addr.ent_addr_3 =
1834 				    BE_16(plun->lun_addr.ent_addr_3);
1835 
1836 				bcopy(&sam_lun_addr, &map->samLUN,
1837 				    FCP_LUN_SIZE);
1838 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1839 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1840 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1841 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1842 
1843 				if (plun->lun_guid) {
1844 
1845 					/* convert ascii wwn to bytes */
1846 					fcp_ascii_to_wwn(plun->lun_guid,
1847 					    map->guid, sizeof (map->guid));
1848 
1849 					if ((sizeof (map->guid)) <
1850 					    plun->lun_guid_size / 2) {
1851 						cmn_err(CE_WARN,
1852 						    "fcp_get_target_mappings:"
1853 						    "guid copy space "
1854 						    "insufficient."
1855 						    "Copy Truncation - "
1856 						    "available %d; need %d",
1857 						    (int)sizeof (map->guid),
1858 						    (int)
1859 						    plun->lun_guid_size / 2);
1860 					}
1861 				}
1862 				kmem_free(path, MAXPATHLEN);
1863 			}
1864 		}
1865 	}
1866 	mutex_exit(&pptr->port_mutex);
1867 	mappings->numLuns = mapIndex;
1868 
1869 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1870 		kmem_free(mappings, mappingSize);
1871 		return (EFAULT);
1872 	}
1873 	kmem_free(mappings, mappingSize);
1874 
1875 #ifdef	_MULTI_DATAMODEL
1876 	switch (ddi_model_convert_from(mode & FMODELS)) {
1877 	case DDI_MODEL_ILP32: {
1878 		struct fcp32_ioctl f32_ioctl;
1879 
1880 		f32_ioctl.fp_minor = fioctl.fp_minor;
1881 		f32_ioctl.listlen = fioctl.listlen;
1882 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1883 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1884 		    sizeof (struct fcp32_ioctl), mode)) {
1885 			return (EFAULT);
1886 		}
1887 		break;
1888 	}
1889 	case DDI_MODEL_NONE:
1890 		if (ddi_copyout((void *)&fioctl, (void *)data,
1891 		    sizeof (struct fcp_ioctl), mode)) {
1892 			return (EFAULT);
1893 		}
1894 		break;
1895 	}
1896 #else	/* _MULTI_DATAMODEL */
1897 
1898 	if (ddi_copyout((void *)&fioctl, (void *)data,
1899 	    sizeof (struct fcp_ioctl), mode)) {
1900 		return (EFAULT);
1901 	}
1902 #endif	/* _MULTI_DATAMODEL */
1903 
1904 	return (0);
1905 }
1906 
1907 /*
1908  * fcp_setup_scsi_ioctl
1909  *	Setup handler for the "scsi passthru" style of
1910  *	ioctl for FCP.	See "fcp_util.h" for data structure
1911  *	definition.
1912  *
1913  * Input:
1914  *	u_fscsi	= ioctl data (user address space)
1915  *	mode	= See ioctl(9E)
1916  *
1917  * Output:
1918  *	u_fscsi	= ioctl data (user address space)
1919  *	rval	= return value - see ioctl(9E)
1920  *
1921  * Returns:
1922  *	0	= OK
1923  *	EAGAIN	= See errno.h
1924  *	EBUSY	= See errno.h
1925  *	EFAULT	= See errno.h
1926  *	EINTR	= See errno.h
1927  *	EINVAL	= See errno.h
1928  *	EIO	= See errno.h
1929  *	ENOMEM	= See errno.h
1930  *	ENXIO	= See errno.h
1931  *
1932  * Context:
1933  *	Kernel context.
1934  */
1935 /* ARGSUSED */
1936 static int
1937 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1938     int mode, int *rval)
1939 {
1940 	int			ret		= 0;
1941 	int			temp_ret;
1942 	caddr_t			k_cdbbufaddr	= NULL;
1943 	caddr_t			k_bufaddr	= NULL;
1944 	caddr_t			k_rqbufaddr	= NULL;
1945 	caddr_t			u_cdbbufaddr;
1946 	caddr_t			u_bufaddr;
1947 	caddr_t			u_rqbufaddr;
1948 	struct fcp_scsi_cmd	k_fscsi;
1949 
1950 	/*
1951 	 * Get fcp_scsi_cmd array element from user address space
1952 	 */
1953 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1954 	    != 0) {
1955 		return (ret);
1956 	}
1957 
1958 
1959 	/*
1960 	 * Even though kmem_alloc() checks the validity of the
1961 	 * buffer length, this check is needed when the
1962 	 * kmem_flags set and the zero buffer length is passed.
1963 	 */
1964 	if ((k_fscsi.scsi_cdblen <= 0) ||
1965 	    (k_fscsi.scsi_buflen <= 0) ||
1966 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
1967 	    (k_fscsi.scsi_rqlen <= 0) ||
1968 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
1969 		return (EINVAL);
1970 	}
1971 
1972 	/*
1973 	 * Allocate data for fcp_scsi_cmd pointer fields
1974 	 */
1975 	if (ret == 0) {
1976 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1977 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1978 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
1979 
1980 		if (k_cdbbufaddr == NULL ||
1981 		    k_bufaddr	 == NULL ||
1982 		    k_rqbufaddr	 == NULL) {
1983 			ret = ENOMEM;
1984 		}
1985 	}
1986 
1987 	/*
1988 	 * Get fcp_scsi_cmd pointer fields from user
1989 	 * address space
1990 	 */
1991 	if (ret == 0) {
1992 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
1993 		u_bufaddr    = k_fscsi.scsi_bufaddr;
1994 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
1995 
1996 		if (ddi_copyin(u_cdbbufaddr,
1997 		    k_cdbbufaddr,
1998 		    k_fscsi.scsi_cdblen,
1999 		    mode)) {
2000 			ret = EFAULT;
2001 		} else if (ddi_copyin(u_bufaddr,
2002 		    k_bufaddr,
2003 		    k_fscsi.scsi_buflen,
2004 		    mode)) {
2005 			ret = EFAULT;
2006 		} else if (ddi_copyin(u_rqbufaddr,
2007 		    k_rqbufaddr,
2008 		    k_fscsi.scsi_rqlen,
2009 		    mode)) {
2010 			ret = EFAULT;
2011 		}
2012 	}
2013 
2014 	/*
2015 	 * Send scsi command (blocking)
2016 	 */
2017 	if (ret == 0) {
2018 		/*
2019 		 * Prior to sending the scsi command, the
2020 		 * fcp_scsi_cmd data structure must contain kernel,
2021 		 * not user, addresses.
2022 		 */
2023 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2024 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2025 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2026 
2027 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2028 
2029 		/*
2030 		 * After sending the scsi command, the
2031 		 * fcp_scsi_cmd data structure must contain user,
2032 		 * not kernel, addresses.
2033 		 */
2034 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2035 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2036 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2037 	}
2038 
2039 	/*
2040 	 * Put fcp_scsi_cmd pointer fields to user address space
2041 	 */
2042 	if (ret == 0) {
2043 		if (ddi_copyout(k_cdbbufaddr,
2044 		    u_cdbbufaddr,
2045 		    k_fscsi.scsi_cdblen,
2046 		    mode)) {
2047 			ret = EFAULT;
2048 		} else if (ddi_copyout(k_bufaddr,
2049 		    u_bufaddr,
2050 		    k_fscsi.scsi_buflen,
2051 		    mode)) {
2052 			ret = EFAULT;
2053 		} else if (ddi_copyout(k_rqbufaddr,
2054 		    u_rqbufaddr,
2055 		    k_fscsi.scsi_rqlen,
2056 		    mode)) {
2057 			ret = EFAULT;
2058 		}
2059 	}
2060 
2061 	/*
2062 	 * Free data for fcp_scsi_cmd pointer fields
2063 	 */
2064 	if (k_cdbbufaddr != NULL) {
2065 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2066 	}
2067 	if (k_bufaddr != NULL) {
2068 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2069 	}
2070 	if (k_rqbufaddr != NULL) {
2071 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2072 	}
2073 
2074 	/*
2075 	 * Put fcp_scsi_cmd array element to user address space
2076 	 */
2077 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2078 	if (temp_ret != 0) {
2079 		ret = temp_ret;
2080 	}
2081 
2082 	/*
2083 	 * Return status
2084 	 */
2085 	return (ret);
2086 }
2087 
2088 
2089 /*
2090  * fcp_copyin_scsi_cmd
2091  *	Copy in fcp_scsi_cmd data structure from user address space.
2092  *	The data may be in 32 bit or 64 bit modes.
2093  *
2094  * Input:
2095  *	base_addr	= from address (user address space)
2096  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2097  *
2098  * Output:
2099  *	fscsi		= to address (kernel address space)
2100  *
2101  * Returns:
2102  *	0	= OK
2103  *	EFAULT	= Error
2104  *
2105  * Context:
2106  *	Kernel context.
2107  */
2108 static int
2109 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2110 {
2111 #ifdef	_MULTI_DATAMODEL
2112 	struct fcp32_scsi_cmd	f32scsi;
2113 
2114 	switch (ddi_model_convert_from(mode & FMODELS)) {
2115 	case DDI_MODEL_ILP32:
2116 		/*
2117 		 * Copy data from user address space
2118 		 */
2119 		if (ddi_copyin((void *)base_addr,
2120 		    &f32scsi,
2121 		    sizeof (struct fcp32_scsi_cmd),
2122 		    mode)) {
2123 			return (EFAULT);
2124 		}
2125 		/*
2126 		 * Convert from 32 bit to 64 bit
2127 		 */
2128 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2129 		break;
2130 	case DDI_MODEL_NONE:
2131 		/*
2132 		 * Copy data from user address space
2133 		 */
2134 		if (ddi_copyin((void *)base_addr,
2135 		    fscsi,
2136 		    sizeof (struct fcp_scsi_cmd),
2137 		    mode)) {
2138 			return (EFAULT);
2139 		}
2140 		break;
2141 	}
2142 #else	/* _MULTI_DATAMODEL */
2143 	/*
2144 	 * Copy data from user address space
2145 	 */
2146 	if (ddi_copyin((void *)base_addr,
2147 	    fscsi,
2148 	    sizeof (struct fcp_scsi_cmd),
2149 	    mode)) {
2150 		return (EFAULT);
2151 	}
2152 #endif	/* _MULTI_DATAMODEL */
2153 
2154 	return (0);
2155 }
2156 
2157 
2158 /*
2159  * fcp_copyout_scsi_cmd
2160  *	Copy out fcp_scsi_cmd data structure to user address space.
2161  *	The data may be in 32 bit or 64 bit modes.
2162  *
2163  * Input:
2164  *	fscsi		= to address (kernel address space)
2165  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2166  *
2167  * Output:
2168  *	base_addr	= from address (user address space)
2169  *
2170  * Returns:
2171  *	0	= OK
2172  *	EFAULT	= Error
2173  *
2174  * Context:
2175  *	Kernel context.
2176  */
2177 static int
2178 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2179 {
2180 #ifdef	_MULTI_DATAMODEL
2181 	struct fcp32_scsi_cmd	f32scsi;
2182 
2183 	switch (ddi_model_convert_from(mode & FMODELS)) {
2184 	case DDI_MODEL_ILP32:
2185 		/*
2186 		 * Convert from 64 bit to 32 bit
2187 		 */
2188 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2189 		/*
2190 		 * Copy data to user address space
2191 		 */
2192 		if (ddi_copyout(&f32scsi,
2193 		    (void *)base_addr,
2194 		    sizeof (struct fcp32_scsi_cmd),
2195 		    mode)) {
2196 			return (EFAULT);
2197 		}
2198 		break;
2199 	case DDI_MODEL_NONE:
2200 		/*
2201 		 * Copy data to user address space
2202 		 */
2203 		if (ddi_copyout(fscsi,
2204 		    (void *)base_addr,
2205 		    sizeof (struct fcp_scsi_cmd),
2206 		    mode)) {
2207 			return (EFAULT);
2208 		}
2209 		break;
2210 	}
2211 #else	/* _MULTI_DATAMODEL */
2212 	/*
2213 	 * Copy data to user address space
2214 	 */
2215 	if (ddi_copyout(fscsi,
2216 	    (void *)base_addr,
2217 	    sizeof (struct fcp_scsi_cmd),
2218 	    mode)) {
2219 		return (EFAULT);
2220 	}
2221 #endif	/* _MULTI_DATAMODEL */
2222 
2223 	return (0);
2224 }
2225 
2226 
2227 /*
2228  * fcp_send_scsi_ioctl
2229  *	Sends the SCSI command in blocking mode.
2230  *
2231  * Input:
2232  *	fscsi		= SCSI command data structure
2233  *
2234  * Output:
2235  *	fscsi		= SCSI command data structure
2236  *
2237  * Returns:
2238  *	0	= OK
2239  *	EAGAIN	= See errno.h
2240  *	EBUSY	= See errno.h
2241  *	EINTR	= See errno.h
2242  *	EINVAL	= See errno.h
2243  *	EIO	= See errno.h
2244  *	ENOMEM	= See errno.h
2245  *	ENXIO	= See errno.h
2246  *
2247  * Context:
2248  *	Kernel context.
2249  */
2250 static int
2251 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2252 {
2253 	struct fcp_lun	*plun		= NULL;
2254 	struct fcp_port	*pptr		= NULL;
2255 	struct fcp_tgt	*ptgt		= NULL;
2256 	fc_packet_t		*fpkt		= NULL;
2257 	struct fcp_ipkt	*icmd		= NULL;
2258 	int			target_created	= FALSE;
2259 	fc_frame_hdr_t		*hp;
2260 	struct fcp_cmd		fcp_cmd;
2261 	struct fcp_cmd		*fcmd;
2262 	union scsi_cdb		*scsi_cdb;
2263 	la_wwn_t		*wwn_ptr;
2264 	int			nodma;
2265 	struct fcp_rsp		*rsp;
2266 	struct fcp_rsp_info	*rsp_info;
2267 	caddr_t			rsp_sense;
2268 	int			buf_len;
2269 	int			info_len;
2270 	int			sense_len;
2271 	struct scsi_extended_sense	*sense_to = NULL;
2272 	timeout_id_t		tid;
2273 	uint8_t			reconfig_lun = FALSE;
2274 	uint8_t			reconfig_pending = FALSE;
2275 	uint8_t			scsi_cmd;
2276 	int			rsp_len;
2277 	int			cmd_index;
2278 	int			fc_status;
2279 	int			pkt_state;
2280 	int			pkt_action;
2281 	int			pkt_reason;
2282 	int			ret, xport_retval = ~FC_SUCCESS;
2283 	int			lcount;
2284 	int			tcount;
2285 	int			reconfig_status;
2286 	int			port_busy = FALSE;
2287 	uchar_t			*lun_string;
2288 
2289 	/*
2290 	 * Check valid SCSI command
2291 	 */
2292 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2293 	ret = EINVAL;
2294 	for (cmd_index = 0;
2295 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2296 	    ret != 0;
2297 	    cmd_index++) {
2298 		/*
2299 		 * First byte of CDB is the SCSI command
2300 		 */
2301 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2302 			ret = 0;
2303 		}
2304 	}
2305 
2306 	/*
2307 	 * Check inputs
2308 	 */
2309 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2310 		ret = EINVAL;
2311 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2312 		/* no larger than */
2313 		ret = EINVAL;
2314 	}
2315 
2316 
2317 	/*
2318 	 * Find FC port
2319 	 */
2320 	if (ret == 0) {
2321 		/*
2322 		 * Acquire global mutex
2323 		 */
2324 		mutex_enter(&fcp_global_mutex);
2325 
2326 		pptr = fcp_port_head;
2327 		while (pptr) {
2328 			if (pptr->port_instance ==
2329 			    (uint32_t)fscsi->scsi_fc_port_num) {
2330 				break;
2331 			} else {
2332 				pptr = pptr->port_next;
2333 			}
2334 		}
2335 
2336 		if (pptr == NULL) {
2337 			ret = ENXIO;
2338 		} else {
2339 			/*
2340 			 * fc_ulp_busy_port can raise power
2341 			 *  so, we must not hold any mutexes involved in PM
2342 			 */
2343 			mutex_exit(&fcp_global_mutex);
2344 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2345 		}
2346 
2347 		if (ret == 0) {
2348 
2349 			/* remember port is busy, so we will release later */
2350 			port_busy = TRUE;
2351 
2352 			/*
2353 			 * If there is a reconfiguration in progress, wait
2354 			 * for it to complete.
2355 			 */
2356 
2357 			fcp_reconfig_wait(pptr);
2358 
2359 			/* reacquire mutexes in order */
2360 			mutex_enter(&fcp_global_mutex);
2361 			mutex_enter(&pptr->port_mutex);
2362 
2363 			/*
2364 			 * Will port accept DMA?
2365 			 */
2366 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2367 			    ? 1 : 0;
2368 
2369 			/*
2370 			 * If init or offline, device not known
2371 			 *
2372 			 * If we are discovering (onlining), we can
2373 			 * NOT obviously provide reliable data about
2374 			 * devices until it is complete
2375 			 */
2376 			if (pptr->port_state &	  (FCP_STATE_INIT |
2377 			    FCP_STATE_OFFLINE)) {
2378 				ret = ENXIO;
2379 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2380 				ret = EBUSY;
2381 			} else {
2382 				/*
2383 				 * Find target from pwwn
2384 				 *
2385 				 * The wwn must be put into a local
2386 				 * variable to ensure alignment.
2387 				 */
2388 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2389 				ptgt = fcp_lookup_target(pptr,
2390 				    (uchar_t *)wwn_ptr);
2391 
2392 				/*
2393 				 * If target not found,
2394 				 */
2395 				if (ptgt == NULL) {
2396 					/*
2397 					 * Note: Still have global &
2398 					 * port mutexes
2399 					 */
2400 					mutex_exit(&pptr->port_mutex);
2401 					ptgt = fcp_port_create_tgt(pptr,
2402 					    wwn_ptr, &ret, &fc_status,
2403 					    &pkt_state, &pkt_action,
2404 					    &pkt_reason);
2405 					mutex_enter(&pptr->port_mutex);
2406 
2407 					fscsi->scsi_fc_status  = fc_status;
2408 					fscsi->scsi_pkt_state  =
2409 					    (uchar_t)pkt_state;
2410 					fscsi->scsi_pkt_reason = pkt_reason;
2411 					fscsi->scsi_pkt_action =
2412 					    (uchar_t)pkt_action;
2413 
2414 					if (ptgt != NULL) {
2415 						target_created = TRUE;
2416 					} else if (ret == 0) {
2417 						ret = ENOMEM;
2418 					}
2419 				}
2420 
2421 				if (ret == 0) {
2422 					/*
2423 					 * Acquire target
2424 					 */
2425 					mutex_enter(&ptgt->tgt_mutex);
2426 
2427 					/*
2428 					 * If target is mark or busy,
2429 					 * then target can not be used
2430 					 */
2431 					if (ptgt->tgt_state &
2432 					    (FCP_TGT_MARK |
2433 					    FCP_TGT_BUSY)) {
2434 						ret = EBUSY;
2435 					} else {
2436 						/*
2437 						 * Mark target as busy
2438 						 */
2439 						ptgt->tgt_state |=
2440 						    FCP_TGT_BUSY;
2441 					}
2442 
2443 					/*
2444 					 * Release target
2445 					 */
2446 					lcount = pptr->port_link_cnt;
2447 					tcount = ptgt->tgt_change_cnt;
2448 					mutex_exit(&ptgt->tgt_mutex);
2449 				}
2450 			}
2451 
2452 			/*
2453 			 * Release port
2454 			 */
2455 			mutex_exit(&pptr->port_mutex);
2456 		}
2457 
2458 		/*
2459 		 * Release global mutex
2460 		 */
2461 		mutex_exit(&fcp_global_mutex);
2462 	}
2463 
2464 	if (ret == 0) {
2465 		uint64_t belun = BE_64(fscsi->scsi_lun);
2466 
2467 		/*
2468 		 * If it's a target device, find lun from pwwn
2469 		 * The wwn must be put into a local
2470 		 * variable to ensure alignment.
2471 		 */
2472 		mutex_enter(&pptr->port_mutex);
2473 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2474 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2475 			/* this is not a target */
2476 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2477 			ret = ENXIO;
2478 		} else if ((belun << 16) != 0) {
2479 			/*
2480 			 * Since fcp only support PD and LU addressing method
2481 			 * so far, the last 6 bytes of a valid LUN are expected
2482 			 * to be filled with 00h.
2483 			 */
2484 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2485 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2486 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2487 			    (uint8_t)(belun >> 62), belun);
2488 			ret = ENXIO;
2489 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2490 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2491 			/*
2492 			 * This is a SCSI target, but no LUN at this
2493 			 * address.
2494 			 *
2495 			 * In the future, we may want to send this to
2496 			 * the target, and let it respond
2497 			 * appropriately
2498 			 */
2499 			ret = ENXIO;
2500 		}
2501 		mutex_exit(&pptr->port_mutex);
2502 	}
2503 
2504 	/*
2505 	 * Finished grabbing external resources
2506 	 * Allocate internal packet (icmd)
2507 	 */
2508 	if (ret == 0) {
2509 		/*
2510 		 * Calc rsp len assuming rsp info included
2511 		 */
2512 		rsp_len = sizeof (struct fcp_rsp) +
2513 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2514 
2515 		icmd = fcp_icmd_alloc(pptr, ptgt,
2516 		    sizeof (struct fcp_cmd),
2517 		    rsp_len,
2518 		    fscsi->scsi_buflen,
2519 		    nodma,
2520 		    lcount,			/* ipkt_link_cnt */
2521 		    tcount,			/* ipkt_change_cnt */
2522 		    0,				/* cause */
2523 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2524 
2525 		if (icmd == NULL) {
2526 			ret = ENOMEM;
2527 		} else {
2528 			/*
2529 			 * Setup internal packet as sema sync
2530 			 */
2531 			fcp_ipkt_sema_init(icmd);
2532 		}
2533 	}
2534 
2535 	if (ret == 0) {
2536 		/*
2537 		 * Init fpkt pointer for use.
2538 		 */
2539 
2540 		fpkt = icmd->ipkt_fpkt;
2541 
2542 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2543 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2544 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2545 
2546 		/*
2547 		 * Init fcmd pointer for use by SCSI command
2548 		 */
2549 
2550 		if (nodma) {
2551 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2552 		} else {
2553 			fcmd = &fcp_cmd;
2554 		}
2555 		bzero(fcmd, sizeof (struct fcp_cmd));
2556 		ptgt = plun->lun_tgt;
2557 
2558 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2559 
2560 		fcmd->fcp_ent_addr.ent_addr_0 =
2561 		    BE_16(*(uint16_t *)&(lun_string[0]));
2562 		fcmd->fcp_ent_addr.ent_addr_1 =
2563 		    BE_16(*(uint16_t *)&(lun_string[2]));
2564 		fcmd->fcp_ent_addr.ent_addr_2 =
2565 		    BE_16(*(uint16_t *)&(lun_string[4]));
2566 		fcmd->fcp_ent_addr.ent_addr_3 =
2567 		    BE_16(*(uint16_t *)&(lun_string[6]));
2568 
2569 		/*
2570 		 * Setup internal packet(icmd)
2571 		 */
2572 		icmd->ipkt_lun		= plun;
2573 		icmd->ipkt_restart	= 0;
2574 		icmd->ipkt_retries	= 0;
2575 		icmd->ipkt_opcode	= 0;
2576 
2577 		/*
2578 		 * Init the frame HEADER Pointer for use
2579 		 */
2580 		hp = &fpkt->pkt_cmd_fhdr;
2581 
2582 		hp->s_id	= pptr->port_id;
2583 		hp->d_id	= ptgt->tgt_d_id;
2584 		hp->r_ctl	= R_CTL_COMMAND;
2585 		hp->type	= FC_TYPE_SCSI_FCP;
2586 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2587 		hp->rsvd	= 0;
2588 		hp->seq_id	= 0;
2589 		hp->seq_cnt	= 0;
2590 		hp->ox_id	= 0xffff;
2591 		hp->rx_id	= 0xffff;
2592 		hp->ro		= 0;
2593 
2594 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2595 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2596 		fcmd->fcp_cntl.cntl_write_data	= 0;
2597 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2598 
2599 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2600 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2601 		    fscsi->scsi_cdblen);
2602 
2603 		if (!nodma) {
2604 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2605 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2606 		}
2607 
2608 		/*
2609 		 * Send SCSI command to FC transport
2610 		 */
2611 
2612 		if (ret == 0) {
2613 			mutex_enter(&ptgt->tgt_mutex);
2614 
2615 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2616 				mutex_exit(&ptgt->tgt_mutex);
2617 				fscsi->scsi_fc_status = xport_retval =
2618 				    fc_ulp_transport(pptr->port_fp_handle,
2619 				    fpkt);
2620 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2621 					ret = EIO;
2622 				}
2623 			} else {
2624 				mutex_exit(&ptgt->tgt_mutex);
2625 				ret = EBUSY;
2626 			}
2627 		}
2628 	}
2629 
2630 	/*
2631 	 * Wait for completion only if fc_ulp_transport was called and it
2632 	 * returned a success. This is the only time callback will happen.
2633 	 * Otherwise, there is no point in waiting
2634 	 */
2635 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2636 		ret = fcp_ipkt_sema_wait(icmd);
2637 	}
2638 
2639 	/*
2640 	 * Copy data to IOCTL data structures
2641 	 */
2642 	rsp = NULL;
2643 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2644 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2645 
2646 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2647 			fcp_log(CE_WARN, pptr->port_dip,
2648 			    "!SCSI command to d_id=0x%x lun=0x%x"
2649 			    " failed, Bad FCP response values:"
2650 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2651 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2652 			    ptgt->tgt_d_id, plun->lun_num,
2653 			    rsp->reserved_0, rsp->reserved_1,
2654 			    rsp->fcp_u.fcp_status.reserved_0,
2655 			    rsp->fcp_u.fcp_status.reserved_1,
2656 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2657 
2658 			ret = EIO;
2659 		}
2660 	}
2661 
2662 	if ((ret == 0) && (rsp != NULL)) {
2663 		/*
2664 		 * Calc response lengths
2665 		 */
2666 		sense_len = 0;
2667 		info_len = 0;
2668 
2669 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2670 			info_len = rsp->fcp_response_len;
2671 		}
2672 
2673 		rsp_info   = (struct fcp_rsp_info *)
2674 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2675 
2676 		/*
2677 		 * Get SCSI status
2678 		 */
2679 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2680 		/*
2681 		 * If a lun was just added or removed and the next command
2682 		 * comes through this interface, we need to capture the check
2683 		 * condition so we can discover the new topology.
2684 		 */
2685 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2686 		    rsp->fcp_u.fcp_status.sense_len_set) {
2687 			sense_len = rsp->fcp_sense_len;
2688 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2689 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2690 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2691 			    (FCP_SENSE_NO_LUN(sense_to))) {
2692 				reconfig_lun = TRUE;
2693 			}
2694 		}
2695 
2696 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2697 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2698 			if (reconfig_lun == FALSE) {
2699 				reconfig_status =
2700 				    fcp_is_reconfig_needed(ptgt, fpkt);
2701 			}
2702 
2703 			if ((reconfig_lun == TRUE) ||
2704 			    (reconfig_status == TRUE)) {
2705 				mutex_enter(&ptgt->tgt_mutex);
2706 				if (ptgt->tgt_tid == NULL) {
2707 					/*
2708 					 * Either we've been notified the
2709 					 * REPORT_LUN data has changed, or
2710 					 * we've determined on our own that
2711 					 * we're out of date.  Kick off
2712 					 * rediscovery.
2713 					 */
2714 					tid = timeout(fcp_reconfigure_luns,
2715 					    (caddr_t)ptgt, drv_usectohz(1));
2716 
2717 					ptgt->tgt_tid = tid;
2718 					ptgt->tgt_state |= FCP_TGT_BUSY;
2719 					ret = EBUSY;
2720 					reconfig_pending = TRUE;
2721 				}
2722 				mutex_exit(&ptgt->tgt_mutex);
2723 			}
2724 		}
2725 
2726 		/*
2727 		 * Calc residuals and buffer lengths
2728 		 */
2729 
2730 		if (ret == 0) {
2731 			buf_len = fscsi->scsi_buflen;
2732 			fscsi->scsi_bufresid	= 0;
2733 			if (rsp->fcp_u.fcp_status.resid_under) {
2734 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2735 					fscsi->scsi_bufresid = rsp->fcp_resid;
2736 				} else {
2737 					cmn_err(CE_WARN, "fcp: bad residue %x "
2738 					    "for txfer len %x", rsp->fcp_resid,
2739 					    fscsi->scsi_buflen);
2740 					fscsi->scsi_bufresid =
2741 					    fscsi->scsi_buflen;
2742 				}
2743 				buf_len -= fscsi->scsi_bufresid;
2744 			}
2745 			if (rsp->fcp_u.fcp_status.resid_over) {
2746 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2747 			}
2748 
2749 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2750 			if (fscsi->scsi_rqlen < sense_len) {
2751 				sense_len = fscsi->scsi_rqlen;
2752 			}
2753 
2754 			fscsi->scsi_fc_rspcode	= 0;
2755 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2756 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2757 			}
2758 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2759 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2760 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2761 
2762 			/*
2763 			 * Copy data and request sense
2764 			 *
2765 			 * Data must be copied by using the FCP_CP_IN macro.
2766 			 * This will ensure the proper byte order since the data
2767 			 * is being copied directly from the memory mapped
2768 			 * device register.
2769 			 *
2770 			 * The response (and request sense) will be in the
2771 			 * correct byte order.	No special copy is necessary.
2772 			 */
2773 
2774 			if (buf_len) {
2775 				FCP_CP_IN(fpkt->pkt_data,
2776 				    fscsi->scsi_bufaddr,
2777 				    fpkt->pkt_data_acc,
2778 				    buf_len);
2779 			}
2780 			bcopy((void *)rsp_sense,
2781 			    (void *)fscsi->scsi_rqbufaddr,
2782 			    sense_len);
2783 		}
2784 	}
2785 
2786 	/*
2787 	 * Cleanup transport data structures if icmd was alloc-ed
2788 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2789 	 */
2790 	if (icmd != NULL) {
2791 		fcp_ipkt_sema_cleanup(icmd);
2792 	}
2793 
2794 	/* restore pm busy/idle status */
2795 	if (port_busy) {
2796 		fc_ulp_idle_port(pptr->port_fp_handle);
2797 	}
2798 
2799 	/*
2800 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2801 	 * flag, it'll be cleared when the reconfig is complete.
2802 	 */
2803 	if ((ptgt != NULL) && !reconfig_pending) {
2804 		/*
2805 		 * If target was created,
2806 		 */
2807 		if (target_created) {
2808 			mutex_enter(&ptgt->tgt_mutex);
2809 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2810 			mutex_exit(&ptgt->tgt_mutex);
2811 		} else {
2812 			/*
2813 			 * De-mark target as busy
2814 			 */
2815 			mutex_enter(&ptgt->tgt_mutex);
2816 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2817 			mutex_exit(&ptgt->tgt_mutex);
2818 		}
2819 	}
2820 	return (ret);
2821 }
2822 
2823 
2824 static int
2825 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2826     fc_packet_t	*fpkt)
2827 {
2828 	uchar_t			*lun_string;
2829 	uint16_t		lun_num, i;
2830 	int			num_luns;
2831 	int			actual_luns;
2832 	int			num_masked_luns;
2833 	int			lun_buflen;
2834 	struct fcp_lun	*plun	= NULL;
2835 	struct fcp_reportlun_resp	*report_lun;
2836 	uint8_t			reconfig_needed = FALSE;
2837 	uint8_t			lun_exists = FALSE;
2838 
2839 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2840 
2841 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2842 	    fpkt->pkt_datalen);
2843 
2844 	/* get number of luns (which is supplied as LUNS * 8) */
2845 	num_luns = BE_32(report_lun->num_lun) >> 3;
2846 
2847 	/*
2848 	 * Figure out exactly how many lun strings our response buffer
2849 	 * can hold.
2850 	 */
2851 	lun_buflen = (fpkt->pkt_datalen -
2852 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2853 
2854 	/*
2855 	 * Is our response buffer full or not? We don't want to
2856 	 * potentially walk beyond the number of luns we have.
2857 	 */
2858 	if (num_luns <= lun_buflen) {
2859 		actual_luns = num_luns;
2860 	} else {
2861 		actual_luns = lun_buflen;
2862 	}
2863 
2864 	mutex_enter(&ptgt->tgt_mutex);
2865 
2866 	/* Scan each lun to see if we have masked it. */
2867 	num_masked_luns = 0;
2868 	if (fcp_lun_blacklist != NULL) {
2869 		for (i = 0; i < actual_luns; i++) {
2870 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2871 			switch (lun_string[0] & 0xC0) {
2872 			case FCP_LUN_ADDRESSING:
2873 			case FCP_PD_ADDRESSING:
2874 				lun_num = ((lun_string[0] & 0x3F) << 8)
2875 				    | lun_string[1];
2876 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2877 				    lun_num) == TRUE) {
2878 					num_masked_luns++;
2879 				}
2880 				break;
2881 			default:
2882 				break;
2883 			}
2884 		}
2885 	}
2886 
2887 	/*
2888 	 * The quick and easy check.  If the number of LUNs reported
2889 	 * doesn't match the number we currently know about, we need
2890 	 * to reconfigure.
2891 	 */
2892 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2893 		mutex_exit(&ptgt->tgt_mutex);
2894 		kmem_free(report_lun, fpkt->pkt_datalen);
2895 		return (TRUE);
2896 	}
2897 
2898 	/*
2899 	 * If the quick and easy check doesn't turn up anything, we walk
2900 	 * the list of luns from the REPORT_LUN response and look for
2901 	 * any luns we don't know about.  If we find one, we know we need
2902 	 * to reconfigure. We will skip LUNs that are masked because of the
2903 	 * blacklist.
2904 	 */
2905 	for (i = 0; i < actual_luns; i++) {
2906 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2907 		lun_exists = FALSE;
2908 		switch (lun_string[0] & 0xC0) {
2909 		case FCP_LUN_ADDRESSING:
2910 		case FCP_PD_ADDRESSING:
2911 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2912 
2913 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2914 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2915 				lun_exists = TRUE;
2916 				break;
2917 			}
2918 
2919 			for (plun = ptgt->tgt_lun; plun;
2920 			    plun = plun->lun_next) {
2921 				if (plun->lun_num == lun_num) {
2922 					lun_exists = TRUE;
2923 					break;
2924 				}
2925 			}
2926 			break;
2927 		default:
2928 			break;
2929 		}
2930 
2931 		if (lun_exists == FALSE) {
2932 			reconfig_needed = TRUE;
2933 			break;
2934 		}
2935 	}
2936 
2937 	mutex_exit(&ptgt->tgt_mutex);
2938 	kmem_free(report_lun, fpkt->pkt_datalen);
2939 
2940 	return (reconfig_needed);
2941 }
2942 
2943 /*
2944  * This function is called by fcp_handle_page83 and uses inquiry response data
2945  * stored in plun->lun_inq to determine whether or not a device is a member of
2946  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2947  * otherwise 1.
2948  */
2949 static int
2950 fcp_symmetric_device_probe(struct fcp_lun *plun)
2951 {
2952 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2953 	char			*devidptr;
2954 	int			i, len;
2955 
2956 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2957 		devidptr = fcp_symmetric_disk_table[i];
2958 		len = (int)strlen(devidptr);
2959 
2960 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2961 			return (0);
2962 		}
2963 	}
2964 	return (1);
2965 }
2966 
2967 
2968 /*
2969  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2970  * It basically returns the current count of # of state change callbacks
2971  * i.e the value of tgt_change_cnt.
2972  *
2973  * INPUT:
2974  *   fcp_ioctl.fp_minor -> The minor # of the fp port
2975  *   fcp_ioctl.listlen	-> 1
2976  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
2977  */
2978 /*ARGSUSED2*/
2979 static int
2980 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
2981 {
2982 	int			ret;
2983 	uint32_t		link_cnt;
2984 	struct fcp_ioctl	fioctl;
2985 	struct fcp_port	*pptr = NULL;
2986 
2987 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
2988 	    &pptr)) != 0) {
2989 		return (ret);
2990 	}
2991 
2992 	ASSERT(pptr != NULL);
2993 
2994 	if (fioctl.listlen != 1) {
2995 		return (EINVAL);
2996 	}
2997 
2998 	mutex_enter(&pptr->port_mutex);
2999 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3000 		mutex_exit(&pptr->port_mutex);
3001 		return (ENXIO);
3002 	}
3003 
3004 	/*
3005 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3006 	 * When the fcp initially attaches to the port and there are nothing
3007 	 * hanging out of the port or if there was a repeat offline state change
3008 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3009 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3010 	 * will differentiate the 2 cases.
3011 	 */
3012 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3013 		mutex_exit(&pptr->port_mutex);
3014 		return (ENXIO);
3015 	}
3016 
3017 	link_cnt = pptr->port_link_cnt;
3018 	mutex_exit(&pptr->port_mutex);
3019 
3020 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3021 		return (EFAULT);
3022 	}
3023 
3024 #ifdef	_MULTI_DATAMODEL
3025 	switch (ddi_model_convert_from(mode & FMODELS)) {
3026 	case DDI_MODEL_ILP32: {
3027 		struct fcp32_ioctl f32_ioctl;
3028 
3029 		f32_ioctl.fp_minor = fioctl.fp_minor;
3030 		f32_ioctl.listlen = fioctl.listlen;
3031 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3032 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3033 		    sizeof (struct fcp32_ioctl), mode)) {
3034 			return (EFAULT);
3035 		}
3036 		break;
3037 	}
3038 	case DDI_MODEL_NONE:
3039 		if (ddi_copyout((void *)&fioctl, (void *)data,
3040 		    sizeof (struct fcp_ioctl), mode)) {
3041 			return (EFAULT);
3042 		}
3043 		break;
3044 	}
3045 #else	/* _MULTI_DATAMODEL */
3046 
3047 	if (ddi_copyout((void *)&fioctl, (void *)data,
3048 	    sizeof (struct fcp_ioctl), mode)) {
3049 		return (EFAULT);
3050 	}
3051 #endif	/* _MULTI_DATAMODEL */
3052 
3053 	return (0);
3054 }
3055 
3056 /*
3057  * This function copies the fcp_ioctl structure passed in from user land
3058  * into kernel land. Handles 32 bit applications.
3059  */
3060 /*ARGSUSED*/
3061 static int
3062 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3063     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3064 {
3065 	struct fcp_port	*t_pptr;
3066 
3067 #ifdef	_MULTI_DATAMODEL
3068 	switch (ddi_model_convert_from(mode & FMODELS)) {
3069 	case DDI_MODEL_ILP32: {
3070 		struct fcp32_ioctl f32_ioctl;
3071 
3072 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3073 		    sizeof (struct fcp32_ioctl), mode)) {
3074 			return (EFAULT);
3075 		}
3076 		fioctl->fp_minor = f32_ioctl.fp_minor;
3077 		fioctl->listlen = f32_ioctl.listlen;
3078 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3079 		break;
3080 	}
3081 	case DDI_MODEL_NONE:
3082 		if (ddi_copyin((void *)data, (void *)fioctl,
3083 		    sizeof (struct fcp_ioctl), mode)) {
3084 			return (EFAULT);
3085 		}
3086 		break;
3087 	}
3088 
3089 #else	/* _MULTI_DATAMODEL */
3090 	if (ddi_copyin((void *)data, (void *)fioctl,
3091 	    sizeof (struct fcp_ioctl), mode)) {
3092 		return (EFAULT);
3093 	}
3094 #endif	/* _MULTI_DATAMODEL */
3095 
3096 	/*
3097 	 * Right now we can assume that the minor number matches with
3098 	 * this instance of fp. If this changes we will need to
3099 	 * revisit this logic.
3100 	 */
3101 	mutex_enter(&fcp_global_mutex);
3102 	t_pptr = fcp_port_head;
3103 	while (t_pptr) {
3104 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3105 			break;
3106 		} else {
3107 			t_pptr = t_pptr->port_next;
3108 		}
3109 	}
3110 	*pptr = t_pptr;
3111 	mutex_exit(&fcp_global_mutex);
3112 	if (t_pptr == NULL) {
3113 		return (ENXIO);
3114 	}
3115 
3116 	return (0);
3117 }
3118 
3119 /*
3120  *     Function: fcp_port_create_tgt
3121  *
3122  *  Description: As the name suggest this function creates the target context
3123  *		 specified by the the WWN provided by the caller.  If the
3124  *		 creation goes well and the target is known by fp/fctl a PLOGI
3125  *		 followed by a PRLI are issued.
3126  *
3127  *     Argument: pptr		fcp port structure
3128  *		 pwwn		WWN of the target
3129  *		 ret_val	Address of the return code.  It could be:
3130  *				EIO, ENOMEM or 0.
3131  *		 fc_status	PLOGI or PRLI status completion
3132  *		 fc_pkt_state	PLOGI or PRLI state completion
3133  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3134  *		 fc_pkt_action	PLOGI or PRLI action completion
3135  *
3136  * Return Value: NULL if it failed
3137  *		 Target structure address if it succeeds
3138  */
3139 static struct fcp_tgt *
3140 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3141     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3142 {
3143 	struct fcp_tgt	*ptgt = NULL;
3144 	fc_portmap_t		devlist;
3145 	int			lcount;
3146 	int			error;
3147 
3148 	*ret_val = 0;
3149 
3150 	/*
3151 	 * Check FC port device & get port map
3152 	 */
3153 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3154 	    &error, 1) == NULL) {
3155 		*ret_val = EIO;
3156 	} else {
3157 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3158 		    &devlist) != FC_SUCCESS) {
3159 			*ret_val = EIO;
3160 		}
3161 	}
3162 
3163 	/* Set port map flags */
3164 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3165 
3166 	/* Allocate target */
3167 	if (*ret_val == 0) {
3168 		lcount = pptr->port_link_cnt;
3169 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3170 		if (ptgt == NULL) {
3171 			fcp_log(CE_WARN, pptr->port_dip,
3172 			    "!FC target allocation failed");
3173 			*ret_val = ENOMEM;
3174 		} else {
3175 			/* Setup target */
3176 			mutex_enter(&ptgt->tgt_mutex);
3177 
3178 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3179 			ptgt->tgt_tmp_cnt	= 1;
3180 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3181 			ptgt->tgt_hard_addr	=
3182 			    devlist.map_hard_addr.hard_addr;
3183 			ptgt->tgt_pd_handle	= devlist.map_pd;
3184 			ptgt->tgt_fca_dev	= NULL;
3185 
3186 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3187 			    FC_WWN_SIZE);
3188 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3189 			    FC_WWN_SIZE);
3190 
3191 			mutex_exit(&ptgt->tgt_mutex);
3192 		}
3193 	}
3194 
3195 	/* Release global mutex for PLOGI and PRLI */
3196 	mutex_exit(&fcp_global_mutex);
3197 
3198 	/* Send PLOGI (If necessary) */
3199 	if (*ret_val == 0) {
3200 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3201 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3202 	}
3203 
3204 	/* Send PRLI (If necessary) */
3205 	if (*ret_val == 0) {
3206 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3207 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3208 	}
3209 
3210 	mutex_enter(&fcp_global_mutex);
3211 
3212 	return (ptgt);
3213 }
3214 
3215 /*
3216  *     Function: fcp_tgt_send_plogi
3217  *
3218  *  Description: This function sends a PLOGI to the target specified by the
3219  *		 caller and waits till it completes.
3220  *
3221  *     Argument: ptgt		Target to send the plogi to.
3222  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3223  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3224  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3225  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3226  *
3227  * Return Value: 0
3228  *		 ENOMEM
3229  *		 EIO
3230  *
3231  *	Context: User context.
3232  */
3233 static int
3234 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3235     int *fc_pkt_reason, int *fc_pkt_action)
3236 {
3237 	struct fcp_port	*pptr;
3238 	struct fcp_ipkt	*icmd;
3239 	struct fc_packet	*fpkt;
3240 	fc_frame_hdr_t		*hp;
3241 	struct la_els_logi	logi;
3242 	int			tcount;
3243 	int			lcount;
3244 	int			ret, login_retval = ~FC_SUCCESS;
3245 
3246 	ret = 0;
3247 
3248 	pptr = ptgt->tgt_port;
3249 
3250 	lcount = pptr->port_link_cnt;
3251 	tcount = ptgt->tgt_change_cnt;
3252 
3253 	/* Alloc internal packet */
3254 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3255 	    sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0,
3256 	    FC_INVALID_RSCN_COUNT);
3257 
3258 	if (icmd == NULL) {
3259 		ret = ENOMEM;
3260 	} else {
3261 		/*
3262 		 * Setup internal packet as sema sync
3263 		 */
3264 		fcp_ipkt_sema_init(icmd);
3265 
3266 		/*
3267 		 * Setup internal packet (icmd)
3268 		 */
3269 		icmd->ipkt_lun		= NULL;
3270 		icmd->ipkt_restart	= 0;
3271 		icmd->ipkt_retries	= 0;
3272 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3273 
3274 		/*
3275 		 * Setup fc_packet
3276 		 */
3277 		fpkt = icmd->ipkt_fpkt;
3278 
3279 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3280 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3281 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3282 
3283 		/*
3284 		 * Setup FC frame header
3285 		 */
3286 		hp = &fpkt->pkt_cmd_fhdr;
3287 
3288 		hp->s_id	= pptr->port_id;	/* source ID */
3289 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3290 		hp->r_ctl	= R_CTL_ELS_REQ;
3291 		hp->type	= FC_TYPE_EXTENDED_LS;
3292 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3293 		hp->seq_id	= 0;
3294 		hp->rsvd	= 0;
3295 		hp->df_ctl	= 0;
3296 		hp->seq_cnt	= 0;
3297 		hp->ox_id	= 0xffff;		/* i.e. none */
3298 		hp->rx_id	= 0xffff;		/* i.e. none */
3299 		hp->ro		= 0;
3300 
3301 		/*
3302 		 * Setup PLOGI
3303 		 */
3304 		bzero(&logi, sizeof (struct la_els_logi));
3305 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3306 
3307 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3308 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3309 
3310 		/*
3311 		 * Send PLOGI
3312 		 */
3313 		*fc_status = login_retval =
3314 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3315 		if (*fc_status != FC_SUCCESS) {
3316 			ret = EIO;
3317 		}
3318 	}
3319 
3320 	/*
3321 	 * Wait for completion
3322 	 */
3323 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3324 		ret = fcp_ipkt_sema_wait(icmd);
3325 
3326 		*fc_pkt_state	= fpkt->pkt_state;
3327 		*fc_pkt_reason	= fpkt->pkt_reason;
3328 		*fc_pkt_action	= fpkt->pkt_action;
3329 	}
3330 
3331 	/*
3332 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3333 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3334 	 * Otherwise, cleanup happens in callback routine.
3335 	 */
3336 	if (icmd != NULL) {
3337 		fcp_ipkt_sema_cleanup(icmd);
3338 	}
3339 
3340 	return (ret);
3341 }
3342 
3343 /*
3344  *     Function: fcp_tgt_send_prli
3345  *
3346  *  Description: Does nothing as of today.
3347  *
3348  *     Argument: ptgt		Target to send the prli to.
3349  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3350  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3351  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3352  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3353  *
3354  * Return Value: 0
3355  */
3356 /*ARGSUSED*/
3357 static int
3358 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3359     int *fc_pkt_reason, int *fc_pkt_action)
3360 {
3361 	return (0);
3362 }
3363 
3364 /*
3365  *     Function: fcp_ipkt_sema_init
3366  *
3367  *  Description: Initializes the semaphore contained in the internal packet.
3368  *
3369  *     Argument: icmd	Internal packet the semaphore of which must be
3370  *			initialized.
3371  *
3372  * Return Value: None
3373  *
3374  *	Context: User context only.
3375  */
3376 static void
3377 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3378 {
3379 	struct fc_packet	*fpkt;
3380 
3381 	fpkt = icmd->ipkt_fpkt;
3382 
3383 	/* Create semaphore for sync */
3384 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3385 
3386 	/* Setup the completion callback */
3387 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3388 }
3389 
3390 /*
3391  *     Function: fcp_ipkt_sema_wait
3392  *
3393  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3394  *		 semaphore is released in the callback.
3395  *
3396  *     Argument: icmd	Internal packet to wait on for completion.
3397  *
3398  * Return Value: 0
3399  *		 EIO
3400  *		 EBUSY
3401  *		 EAGAIN
3402  *
3403  *	Context: User context only.
3404  *
3405  * This function does a conversion between the field pkt_state of the fc_packet
3406  * embedded in the internal packet (icmd) and the code it returns.
3407  */
3408 static int
3409 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3410 {
3411 	struct fc_packet	*fpkt;
3412 	int	ret;
3413 
3414 	ret = EIO;
3415 	fpkt = icmd->ipkt_fpkt;
3416 
3417 	/*
3418 	 * Wait on semaphore
3419 	 */
3420 	sema_p(&(icmd->ipkt_sema));
3421 
3422 	/*
3423 	 * Check the status of the FC packet
3424 	 */
3425 	switch (fpkt->pkt_state) {
3426 	case FC_PKT_SUCCESS:
3427 		ret = 0;
3428 		break;
3429 	case FC_PKT_LOCAL_RJT:
3430 		switch (fpkt->pkt_reason) {
3431 		case FC_REASON_SEQ_TIMEOUT:
3432 		case FC_REASON_RX_BUF_TIMEOUT:
3433 			ret = EAGAIN;
3434 			break;
3435 		case FC_REASON_PKT_BUSY:
3436 			ret = EBUSY;
3437 			break;
3438 		}
3439 		break;
3440 	case FC_PKT_TIMEOUT:
3441 		ret = EAGAIN;
3442 		break;
3443 	case FC_PKT_LOCAL_BSY:
3444 	case FC_PKT_TRAN_BSY:
3445 	case FC_PKT_NPORT_BSY:
3446 	case FC_PKT_FABRIC_BSY:
3447 		ret = EBUSY;
3448 		break;
3449 	case FC_PKT_LS_RJT:
3450 	case FC_PKT_BA_RJT:
3451 		switch (fpkt->pkt_reason) {
3452 		case FC_REASON_LOGICAL_BSY:
3453 			ret = EBUSY;
3454 			break;
3455 		}
3456 		break;
3457 	case FC_PKT_FS_RJT:
3458 		switch (fpkt->pkt_reason) {
3459 		case FC_REASON_FS_LOGICAL_BUSY:
3460 			ret = EBUSY;
3461 			break;
3462 		}
3463 		break;
3464 	}
3465 
3466 	return (ret);
3467 }
3468 
3469 /*
3470  *     Function: fcp_ipkt_sema_callback
3471  *
3472  *  Description: Registered as the completion callback function for the FC
3473  *		 transport when the ipkt semaphore is used for sync. This will
3474  *		 cleanup the used data structures, if necessary and wake up
3475  *		 the user thread to complete the transaction.
3476  *
3477  *     Argument: fpkt	FC packet (points to the icmd)
3478  *
3479  * Return Value: None
3480  *
3481  *	Context: User context only
3482  */
3483 static void
3484 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3485 {
3486 	struct fcp_ipkt	*icmd;
3487 
3488 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3489 
3490 	/*
3491 	 * Wake up user thread
3492 	 */
3493 	sema_v(&(icmd->ipkt_sema));
3494 }
3495 
3496 /*
3497  *     Function: fcp_ipkt_sema_cleanup
3498  *
3499  *  Description: Called to cleanup (if necessary) the data structures used
3500  *		 when ipkt sema is used for sync.  This function will detect
3501  *		 whether the caller is the last thread (via counter) and
3502  *		 cleanup only if necessary.
3503  *
3504  *     Argument: icmd	Internal command packet
3505  *
3506  * Return Value: None
3507  *
3508  *	Context: User context only
3509  */
3510 static void
3511 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3512 {
3513 	struct fcp_tgt	*ptgt;
3514 	struct fcp_port	*pptr;
3515 
3516 	ptgt = icmd->ipkt_tgt;
3517 	pptr = icmd->ipkt_port;
3518 
3519 	/*
3520 	 * Acquire data structure
3521 	 */
3522 	mutex_enter(&ptgt->tgt_mutex);
3523 
3524 	/*
3525 	 * Destroy semaphore
3526 	 */
3527 	sema_destroy(&(icmd->ipkt_sema));
3528 
3529 	/*
3530 	 * Cleanup internal packet
3531 	 */
3532 	mutex_exit(&ptgt->tgt_mutex);
3533 	fcp_icmd_free(pptr, icmd);
3534 }
3535 
3536 /*
3537  *     Function: fcp_port_attach
3538  *
3539  *  Description: Called by the transport framework to resume, suspend or
3540  *		 attach a new port.
3541  *
3542  *     Argument: ulph		Port handle
3543  *		 *pinfo		Port information
3544  *		 cmd		Command
3545  *		 s_id		Port ID
3546  *
3547  * Return Value: FC_FAILURE or FC_SUCCESS
3548  */
3549 /*ARGSUSED*/
3550 static int
3551 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3552     fc_attach_cmd_t cmd, uint32_t s_id)
3553 {
3554 	int	instance;
3555 	int	res = FC_FAILURE; /* default result */
3556 
3557 	ASSERT(pinfo != NULL);
3558 
3559 	instance = ddi_get_instance(pinfo->port_dip);
3560 
3561 	switch (cmd) {
3562 	case FC_CMD_ATTACH:
3563 		/*
3564 		 * this port instance attaching for the first time (or after
3565 		 * being detached before)
3566 		 */
3567 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3568 		    instance) == DDI_SUCCESS) {
3569 			res = FC_SUCCESS;
3570 		} else {
3571 			ASSERT(ddi_get_soft_state(fcp_softstate,
3572 			    instance) == NULL);
3573 		}
3574 		break;
3575 
3576 	case FC_CMD_RESUME:
3577 	case FC_CMD_POWER_UP:
3578 		/*
3579 		 * this port instance was attached and the suspended and
3580 		 * will now be resumed
3581 		 */
3582 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3583 		    instance) == DDI_SUCCESS) {
3584 			res = FC_SUCCESS;
3585 		}
3586 		break;
3587 
3588 	default:
3589 		/* shouldn't happen */
3590 		FCP_TRACE(fcp_logq, "fcp",
3591 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3592 		    "port_attach: unknown cmdcommand: %d", cmd);
3593 		break;
3594 	}
3595 
3596 	/* return result */
3597 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3598 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3599 
3600 	return (res);
3601 }
3602 
3603 
3604 /*
3605  * detach or suspend this port instance
3606  *
3607  * acquires and releases the global mutex
3608  *
3609  * acquires and releases the mutex for this port
3610  *
3611  * acquires and releases the hotplug mutex for this port
3612  */
3613 /*ARGSUSED*/
3614 static int
3615 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3616     fc_detach_cmd_t cmd)
3617 {
3618 	int			flag;
3619 	int			instance;
3620 	struct fcp_port		*pptr;
3621 
3622 	instance = ddi_get_instance(info->port_dip);
3623 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3624 
3625 	switch (cmd) {
3626 	case FC_CMD_SUSPEND:
3627 		FCP_DTRACE(fcp_logq, "fcp",
3628 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3629 		    "port suspend called for port %d", instance);
3630 		flag = FCP_STATE_SUSPENDED;
3631 		break;
3632 
3633 	case FC_CMD_POWER_DOWN:
3634 		FCP_DTRACE(fcp_logq, "fcp",
3635 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3636 		    "port power down called for port %d", instance);
3637 		flag = FCP_STATE_POWER_DOWN;
3638 		break;
3639 
3640 	case FC_CMD_DETACH:
3641 		FCP_DTRACE(fcp_logq, "fcp",
3642 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3643 		    "port detach called for port %d", instance);
3644 		flag = FCP_STATE_DETACHING;
3645 		break;
3646 
3647 	default:
3648 		/* shouldn't happen */
3649 		return (FC_FAILURE);
3650 	}
3651 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3652 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3653 
3654 	return (fcp_handle_port_detach(pptr, flag, instance));
3655 }
3656 
3657 
3658 /*
3659  * called for ioctls on the transport's devctl interface, and the transport
3660  * has passed it to us
3661  *
3662  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3663  *
3664  * return FC_SUCCESS if we decide to claim the ioctl,
3665  * else return FC_UNCLAIMED
3666  *
3667  * *rval is set iff we decide to claim the ioctl
3668  */
3669 /*ARGSUSED*/
3670 static int
3671 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3672     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3673 {
3674 	int			retval = FC_UNCLAIMED;	/* return value */
3675 	struct fcp_port		*pptr = NULL;		/* our soft state */
3676 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3677 	dev_info_t		*cdip;
3678 	mdi_pathinfo_t		*pip = NULL;
3679 	char			*ndi_nm;		/* NDI name */
3680 	char			*ndi_addr;		/* NDI addr */
3681 	int			is_mpxio, circ;
3682 	int			devi_entered = 0;
3683 	time_t			end_time;
3684 
3685 	ASSERT(rval != NULL);
3686 
3687 	FCP_DTRACE(fcp_logq, "fcp",
3688 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3690 
3691 	/* if already claimed then forget it */
3692 	if (claimed) {
3693 		/*
3694 		 * for now, if this ioctl has already been claimed, then
3695 		 * we just ignore it
3696 		 */
3697 		return (retval);
3698 	}
3699 
3700 	/* get our port info */
3701 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3702 		fcp_log(CE_WARN, NULL,
3703 		    "!fcp:Invalid port handle handle in ioctl");
3704 		*rval = ENXIO;
3705 		return (retval);
3706 	}
3707 	is_mpxio = pptr->port_mpxio;
3708 
3709 	switch (cmd) {
3710 	case DEVCTL_BUS_GETSTATE:
3711 	case DEVCTL_BUS_QUIESCE:
3712 	case DEVCTL_BUS_UNQUIESCE:
3713 	case DEVCTL_BUS_RESET:
3714 	case DEVCTL_BUS_RESETALL:
3715 
3716 	case DEVCTL_BUS_DEV_CREATE:
3717 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3718 			return (retval);
3719 		}
3720 		break;
3721 
3722 	case DEVCTL_DEVICE_GETSTATE:
3723 	case DEVCTL_DEVICE_OFFLINE:
3724 	case DEVCTL_DEVICE_ONLINE:
3725 	case DEVCTL_DEVICE_REMOVE:
3726 	case DEVCTL_DEVICE_RESET:
3727 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3728 			return (retval);
3729 		}
3730 
3731 		ASSERT(dcp != NULL);
3732 
3733 		/* ensure we have a name and address */
3734 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3735 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3736 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3737 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3738 			    "ioctl: can't get name (%s) or addr (%s)",
3739 			    ndi_nm ? ndi_nm : "<null ptr>",
3740 			    ndi_addr ? ndi_addr : "<null ptr>");
3741 			ndi_dc_freehdl(dcp);
3742 			return (retval);
3743 		}
3744 
3745 
3746 		/* get our child's DIP */
3747 		ASSERT(pptr != NULL);
3748 		if (is_mpxio) {
3749 			mdi_devi_enter(pptr->port_dip, &circ);
3750 		} else {
3751 			ndi_devi_enter(pptr->port_dip, &circ);
3752 		}
3753 		devi_entered = 1;
3754 
3755 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3756 		    ndi_addr)) == NULL) {
3757 			/* Look for virtually enumerated devices. */
3758 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3759 			if (pip == NULL ||
3760 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3761 				*rval = ENXIO;
3762 				goto out;
3763 			}
3764 		}
3765 		break;
3766 
3767 	default:
3768 		*rval = ENOTTY;
3769 		return (retval);
3770 	}
3771 
3772 	/* this ioctl is ours -- process it */
3773 
3774 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3775 
3776 	/* we assume it will be a success; else we'll set error value */
3777 	*rval = 0;
3778 
3779 
3780 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3781 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3782 	    "ioctl: claiming this one");
3783 
3784 	/* handle ioctls now */
3785 	switch (cmd) {
3786 	case DEVCTL_DEVICE_GETSTATE:
3787 		ASSERT(cdip != NULL);
3788 		ASSERT(dcp != NULL);
3789 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3790 			*rval = EFAULT;
3791 		}
3792 		break;
3793 
3794 	case DEVCTL_DEVICE_REMOVE:
3795 	case DEVCTL_DEVICE_OFFLINE: {
3796 		int			flag = 0;
3797 		int			lcount;
3798 		int			tcount;
3799 		struct fcp_pkt	*head = NULL;
3800 		struct fcp_lun	*plun;
3801 		child_info_t		*cip = CIP(cdip);
3802 		int			all = 1;
3803 		struct fcp_lun	*tplun;
3804 		struct fcp_tgt	*ptgt;
3805 
3806 		ASSERT(pptr != NULL);
3807 		ASSERT(cdip != NULL);
3808 
3809 		mutex_enter(&pptr->port_mutex);
3810 		if (pip != NULL) {
3811 			cip = CIP(pip);
3812 		}
3813 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3814 			mutex_exit(&pptr->port_mutex);
3815 			*rval = ENXIO;
3816 			break;
3817 		}
3818 
3819 		head = fcp_scan_commands(plun);
3820 		if (head != NULL) {
3821 			fcp_abort_commands(head, LUN_PORT);
3822 		}
3823 		lcount = pptr->port_link_cnt;
3824 		tcount = plun->lun_tgt->tgt_change_cnt;
3825 		mutex_exit(&pptr->port_mutex);
3826 
3827 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3828 			flag = NDI_DEVI_REMOVE;
3829 		}
3830 
3831 		if (is_mpxio) {
3832 			mdi_devi_exit(pptr->port_dip, circ);
3833 		} else {
3834 			ndi_devi_exit(pptr->port_dip, circ);
3835 		}
3836 		devi_entered = 0;
3837 
3838 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3839 		    FCP_OFFLINE, lcount, tcount, flag);
3840 
3841 		if (*rval != NDI_SUCCESS) {
3842 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3843 			break;
3844 		}
3845 
3846 		fcp_update_offline_flags(plun);
3847 
3848 		ptgt = plun->lun_tgt;
3849 		mutex_enter(&ptgt->tgt_mutex);
3850 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3851 		    tplun->lun_next) {
3852 			mutex_enter(&tplun->lun_mutex);
3853 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3854 				all = 0;
3855 			}
3856 			mutex_exit(&tplun->lun_mutex);
3857 		}
3858 
3859 		if (all) {
3860 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3861 			/*
3862 			 * The user is unconfiguring/offlining the device.
3863 			 * If fabric and the auto configuration is set
3864 			 * then make sure the user is the only one who
3865 			 * can reconfigure the device.
3866 			 */
3867 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3868 			    fcp_enable_auto_configuration) {
3869 				ptgt->tgt_manual_config_only = 1;
3870 			}
3871 		}
3872 		mutex_exit(&ptgt->tgt_mutex);
3873 		break;
3874 	}
3875 
3876 	case DEVCTL_DEVICE_ONLINE: {
3877 		int			lcount;
3878 		int			tcount;
3879 		struct fcp_lun	*plun;
3880 		child_info_t		*cip = CIP(cdip);
3881 
3882 		ASSERT(cdip != NULL);
3883 		ASSERT(pptr != NULL);
3884 
3885 		mutex_enter(&pptr->port_mutex);
3886 		if (pip != NULL) {
3887 			cip = CIP(pip);
3888 		}
3889 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3890 			mutex_exit(&pptr->port_mutex);
3891 			*rval = ENXIO;
3892 			break;
3893 		}
3894 		lcount = pptr->port_link_cnt;
3895 		tcount = plun->lun_tgt->tgt_change_cnt;
3896 		mutex_exit(&pptr->port_mutex);
3897 
3898 		/*
3899 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3900 		 * to allow the device attach to occur when the device is
3901 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3902 		 * from the scsi_probe()).
3903 		 */
3904 		mutex_enter(&LUN_TGT->tgt_mutex);
3905 		plun->lun_state |= FCP_LUN_ONLINING;
3906 		mutex_exit(&LUN_TGT->tgt_mutex);
3907 
3908 		if (is_mpxio) {
3909 			mdi_devi_exit(pptr->port_dip, circ);
3910 		} else {
3911 			ndi_devi_exit(pptr->port_dip, circ);
3912 		}
3913 		devi_entered = 0;
3914 
3915 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3916 		    FCP_ONLINE, lcount, tcount, 0);
3917 
3918 		if (*rval != NDI_SUCCESS) {
3919 			/* Reset the FCP_LUN_ONLINING bit */
3920 			mutex_enter(&LUN_TGT->tgt_mutex);
3921 			plun->lun_state &= ~FCP_LUN_ONLINING;
3922 			mutex_exit(&LUN_TGT->tgt_mutex);
3923 			*rval = EIO;
3924 			break;
3925 		}
3926 		mutex_enter(&LUN_TGT->tgt_mutex);
3927 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3928 		    FCP_LUN_ONLINING);
3929 		mutex_exit(&LUN_TGT->tgt_mutex);
3930 		break;
3931 	}
3932 
3933 	case DEVCTL_BUS_DEV_CREATE: {
3934 		uchar_t			*bytes = NULL;
3935 		uint_t			nbytes;
3936 		struct fcp_tgt		*ptgt = NULL;
3937 		struct fcp_lun		*plun = NULL;
3938 		dev_info_t		*useless_dip = NULL;
3939 
3940 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3941 		    DEVCTL_CONSTRUCT, &useless_dip);
3942 		if (*rval != 0 || useless_dip == NULL) {
3943 			break;
3944 		}
3945 
3946 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3947 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3948 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3949 			*rval = EINVAL;
3950 			(void) ndi_devi_free(useless_dip);
3951 			if (bytes != NULL) {
3952 				ddi_prop_free(bytes);
3953 			}
3954 			break;
3955 		}
3956 
3957 		*rval = fcp_create_on_demand(pptr, bytes);
3958 		if (*rval == 0) {
3959 			mutex_enter(&pptr->port_mutex);
3960 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3961 			if (ptgt) {
3962 				/*
3963 				 * We now have a pointer to the target that
3964 				 * was created. Lets point to the first LUN on
3965 				 * this new target.
3966 				 */
3967 				mutex_enter(&ptgt->tgt_mutex);
3968 
3969 				plun = ptgt->tgt_lun;
3970 				/*
3971 				 * There may be stale/offline LUN entries on
3972 				 * this list (this is by design) and so we have
3973 				 * to make sure we point to the first online
3974 				 * LUN
3975 				 */
3976 				while (plun &&
3977 				    plun->lun_state & FCP_LUN_OFFLINE) {
3978 					plun = plun->lun_next;
3979 				}
3980 
3981 				mutex_exit(&ptgt->tgt_mutex);
3982 			}
3983 			mutex_exit(&pptr->port_mutex);
3984 		}
3985 
3986 		if (*rval == 0 && ptgt && plun) {
3987 			mutex_enter(&plun->lun_mutex);
3988 			/*
3989 			 * Allow up to fcp_lun_ready_retry seconds to
3990 			 * configure all the luns behind the target.
3991 			 *
3992 			 * The intent here is to allow targets with long
3993 			 * reboot/reset-recovery times to become available
3994 			 * while limiting the maximum wait time for an
3995 			 * unresponsive target.
3996 			 */
3997 			end_time = ddi_get_lbolt() +
3998 			    SEC_TO_TICK(fcp_lun_ready_retry);
3999 
4000 			while (ddi_get_lbolt() < end_time) {
4001 				retval = FC_SUCCESS;
4002 
4003 				/*
4004 				 * The new ndi interfaces for on-demand creation
4005 				 * are inflexible, Do some more work to pass on
4006 				 * a path name of some LUN (design is broken !)
4007 				 */
4008 				if (plun->lun_cip) {
4009 					if (plun->lun_mpxio == 0) {
4010 						cdip = DIP(plun->lun_cip);
4011 					} else {
4012 						cdip = mdi_pi_get_client(
4013 						    PIP(plun->lun_cip));
4014 					}
4015 					if (cdip == NULL) {
4016 						*rval = ENXIO;
4017 						break;
4018 					}
4019 
4020 					if (!i_ddi_devi_attached(cdip)) {
4021 						mutex_exit(&plun->lun_mutex);
4022 						delay(drv_usectohz(1000000));
4023 						mutex_enter(&plun->lun_mutex);
4024 					} else {
4025 						/*
4026 						 * This Lun is ready, lets
4027 						 * check the next one.
4028 						 */
4029 						mutex_exit(&plun->lun_mutex);
4030 						plun = plun->lun_next;
4031 						while (plun && (plun->lun_state
4032 						    & FCP_LUN_OFFLINE)) {
4033 							plun = plun->lun_next;
4034 						}
4035 						if (!plun) {
4036 							break;
4037 						}
4038 						mutex_enter(&plun->lun_mutex);
4039 					}
4040 				} else {
4041 					/*
4042 					 * lun_cip field for a valid lun
4043 					 * should never be NULL. Fail the
4044 					 * command.
4045 					 */
4046 					*rval = ENXIO;
4047 					break;
4048 				}
4049 			}
4050 			if (plun) {
4051 				mutex_exit(&plun->lun_mutex);
4052 			} else {
4053 				char devnm[MAXNAMELEN];
4054 				int nmlen;
4055 
4056 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4057 				    ddi_node_name(cdip),
4058 				    ddi_get_name_addr(cdip));
4059 
4060 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4061 				    0) {
4062 					*rval = EFAULT;
4063 				}
4064 			}
4065 		} else {
4066 			int	i;
4067 			char	buf[25];
4068 
4069 			for (i = 0; i < FC_WWN_SIZE; i++) {
4070 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4071 			}
4072 
4073 			fcp_log(CE_WARN, pptr->port_dip,
4074 			    "!Failed to create nodes for pwwn=%s; error=%x",
4075 			    buf, *rval);
4076 		}
4077 
4078 		(void) ndi_devi_free(useless_dip);
4079 		ddi_prop_free(bytes);
4080 		break;
4081 	}
4082 
4083 	case DEVCTL_DEVICE_RESET: {
4084 		struct fcp_lun		*plun;
4085 		child_info_t		*cip = CIP(cdip);
4086 
4087 		ASSERT(cdip != NULL);
4088 		ASSERT(pptr != NULL);
4089 		mutex_enter(&pptr->port_mutex);
4090 		if (pip != NULL) {
4091 			cip = CIP(pip);
4092 		}
4093 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4094 			mutex_exit(&pptr->port_mutex);
4095 			*rval = ENXIO;
4096 			break;
4097 		}
4098 		mutex_exit(&pptr->port_mutex);
4099 
4100 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4101 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4102 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4103 
4104 			*rval = ENXIO;
4105 			break;
4106 		}
4107 
4108 		if (plun->lun_sd == NULL) {
4109 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4110 
4111 			*rval = ENXIO;
4112 			break;
4113 		}
4114 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4115 
4116 		/*
4117 		 * set up ap so that fcp_reset can figure out
4118 		 * which target to reset
4119 		 */
4120 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4121 		    RESET_TARGET) == FALSE) {
4122 			*rval = EIO;
4123 		}
4124 		break;
4125 	}
4126 
4127 	case DEVCTL_BUS_GETSTATE:
4128 		ASSERT(dcp != NULL);
4129 		ASSERT(pptr != NULL);
4130 		ASSERT(pptr->port_dip != NULL);
4131 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4132 		    NDI_SUCCESS) {
4133 			*rval = EFAULT;
4134 		}
4135 		break;
4136 
4137 	case DEVCTL_BUS_QUIESCE:
4138 	case DEVCTL_BUS_UNQUIESCE:
4139 		*rval = ENOTSUP;
4140 		break;
4141 
4142 	case DEVCTL_BUS_RESET:
4143 	case DEVCTL_BUS_RESETALL:
4144 		ASSERT(pptr != NULL);
4145 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4146 		break;
4147 
4148 	default:
4149 		ASSERT(dcp != NULL);
4150 		*rval = ENOTTY;
4151 		break;
4152 	}
4153 
4154 	/* all done -- clean up and return */
4155 out:	if (devi_entered) {
4156 		if (is_mpxio) {
4157 			mdi_devi_exit(pptr->port_dip, circ);
4158 		} else {
4159 			ndi_devi_exit(pptr->port_dip, circ);
4160 		}
4161 	}
4162 
4163 	if (dcp != NULL) {
4164 		ndi_dc_freehdl(dcp);
4165 	}
4166 
4167 	return (retval);
4168 }
4169 
4170 
4171 /*ARGSUSED*/
4172 static int
4173 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4174     uint32_t claimed)
4175 {
4176 	uchar_t			r_ctl;
4177 	uchar_t			ls_code;
4178 	struct fcp_port	*pptr;
4179 
4180 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4181 		return (FC_UNCLAIMED);
4182 	}
4183 
4184 	mutex_enter(&pptr->port_mutex);
4185 	if (pptr->port_state & (FCP_STATE_DETACHING |
4186 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4187 		mutex_exit(&pptr->port_mutex);
4188 		return (FC_UNCLAIMED);
4189 	}
4190 	mutex_exit(&pptr->port_mutex);
4191 
4192 	r_ctl = buf->ub_frame.r_ctl;
4193 
4194 	switch (r_ctl & R_CTL_ROUTING) {
4195 	case R_CTL_EXTENDED_SVC:
4196 		if (r_ctl == R_CTL_ELS_REQ) {
4197 			ls_code = buf->ub_buffer[0];
4198 
4199 			switch (ls_code) {
4200 			case LA_ELS_PRLI:
4201 				/*
4202 				 * We really don't care if something fails.
4203 				 * If the PRLI was not sent out, then the
4204 				 * other end will time it out.
4205 				 */
4206 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4207 					return (FC_SUCCESS);
4208 				}
4209 				return (FC_UNCLAIMED);
4210 				/* NOTREACHED */
4211 
4212 			default:
4213 				break;
4214 			}
4215 		}
4216 		/* FALLTHROUGH */
4217 
4218 	default:
4219 		return (FC_UNCLAIMED);
4220 	}
4221 }
4222 
4223 
4224 /*ARGSUSED*/
4225 static int
4226 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4227     uint32_t claimed)
4228 {
4229 	return (FC_UNCLAIMED);
4230 }
4231 
4232 /*
4233  *     Function: fcp_statec_callback
4234  *
4235  *  Description: The purpose of this function is to handle a port state change.
4236  *		 It is called from fp/fctl and, in a few instances, internally.
4237  *
4238  *     Argument: ulph		fp/fctl port handle
4239  *		 port_handle	fcp_port structure
4240  *		 port_state	Physical state of the port
4241  *		 port_top	Topology
4242  *		 *devlist	Pointer to the first entry of a table
4243  *				containing the remote ports that can be
4244  *				reached.
4245  *		 dev_cnt	Number of entries pointed by devlist.
4246  *		 port_sid	Port ID of the local port.
4247  *
4248  * Return Value: None
4249  */
4250 /*ARGSUSED*/
4251 static void
4252 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4253     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4254     uint32_t dev_cnt, uint32_t port_sid)
4255 {
4256 	uint32_t		link_count;
4257 	int			map_len = 0;
4258 	struct fcp_port	*pptr;
4259 	fcp_map_tag_t		*map_tag = NULL;
4260 
4261 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4262 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4263 		return;			/* nothing to work with! */
4264 	}
4265 
4266 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4267 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4268 	    "fcp_statec_callback: port state/dev_cnt/top ="
4269 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4270 	    dev_cnt, port_top);
4271 
4272 	mutex_enter(&pptr->port_mutex);
4273 
4274 	/*
4275 	 * If a thread is in detach, don't do anything.
4276 	 */
4277 	if (pptr->port_state & (FCP_STATE_DETACHING |
4278 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4279 		mutex_exit(&pptr->port_mutex);
4280 		return;
4281 	}
4282 
4283 	/*
4284 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4285 	 * init_pkt is called, it knows whether or not the target's status
4286 	 * (or pd) might be changing.
4287 	 */
4288 
4289 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4290 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4291 	}
4292 
4293 	/*
4294 	 * the transport doesn't allocate or probe unless being
4295 	 * asked to by either the applications or ULPs
4296 	 *
4297 	 * in cases where the port is OFFLINE at the time of port
4298 	 * attach callback and the link comes ONLINE later, for
4299 	 * easier automatic node creation (i.e. without you having to
4300 	 * go out and run the utility to perform LOGINs) the
4301 	 * following conditional is helpful
4302 	 */
4303 	pptr->port_phys_state = port_state;
4304 
4305 	if (dev_cnt) {
4306 		mutex_exit(&pptr->port_mutex);
4307 
4308 		map_len = sizeof (*map_tag) * dev_cnt;
4309 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4310 		if (map_tag == NULL) {
4311 			fcp_log(CE_WARN, pptr->port_dip,
4312 			    "!fcp%d: failed to allocate for map tags; "
4313 			    " state change will not be processed",
4314 			    pptr->port_instance);
4315 
4316 			mutex_enter(&pptr->port_mutex);
4317 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4318 			mutex_exit(&pptr->port_mutex);
4319 
4320 			return;
4321 		}
4322 
4323 		mutex_enter(&pptr->port_mutex);
4324 	}
4325 
4326 	if (pptr->port_id != port_sid) {
4327 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4328 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4329 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4330 		    port_sid);
4331 		/*
4332 		 * The local port changed ID. It is the first time a port ID
4333 		 * is assigned or something drastic happened.  We might have
4334 		 * been unplugged and replugged on another loop or fabric port
4335 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4336 		 * the fabric we were plugged into.
4337 		 */
4338 		pptr->port_id = port_sid;
4339 	}
4340 
4341 	switch (FC_PORT_STATE_MASK(port_state)) {
4342 	case FC_STATE_OFFLINE:
4343 	case FC_STATE_RESET_REQUESTED:
4344 		/*
4345 		 * link has gone from online to offline -- just update the
4346 		 * state of this port to BUSY and MARKed to go offline
4347 		 */
4348 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4349 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4350 		    "link went offline");
4351 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4352 			/*
4353 			 * We were offline a while ago and this one
4354 			 * seems to indicate that the loop has gone
4355 			 * dead forever.
4356 			 */
4357 			pptr->port_tmp_cnt += dev_cnt;
4358 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4359 			pptr->port_state |= FCP_STATE_INIT;
4360 			link_count = pptr->port_link_cnt;
4361 			fcp_handle_devices(pptr, devlist, dev_cnt,
4362 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4363 		} else {
4364 			pptr->port_link_cnt++;
4365 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4366 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4367 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4368 			if (pptr->port_mpxio) {
4369 				fcp_update_mpxio_path_verifybusy(pptr);
4370 			}
4371 			pptr->port_state |= FCP_STATE_OFFLINE;
4372 			pptr->port_state &=
4373 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4374 			pptr->port_tmp_cnt = 0;
4375 		}
4376 		mutex_exit(&pptr->port_mutex);
4377 		break;
4378 
4379 	case FC_STATE_ONLINE:
4380 	case FC_STATE_LIP:
4381 	case FC_STATE_LIP_LBIT_SET:
4382 		/*
4383 		 * link has gone from offline to online
4384 		 */
4385 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4386 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4387 		    "link went online");
4388 
4389 		pptr->port_link_cnt++;
4390 
4391 		while (pptr->port_ipkt_cnt) {
4392 			mutex_exit(&pptr->port_mutex);
4393 			delay(drv_usectohz(1000000));
4394 			mutex_enter(&pptr->port_mutex);
4395 		}
4396 
4397 		pptr->port_topology = port_top;
4398 
4399 		/*
4400 		 * The state of the targets and luns accessible through this
4401 		 * port is updated.
4402 		 */
4403 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4404 		    FCP_CAUSE_LINK_CHANGE);
4405 
4406 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4407 		pptr->port_state |= FCP_STATE_ONLINING;
4408 		pptr->port_tmp_cnt = dev_cnt;
4409 		link_count = pptr->port_link_cnt;
4410 
4411 		pptr->port_deadline = fcp_watchdog_time +
4412 		    FCP_ICMD_DEADLINE;
4413 
4414 		if (!dev_cnt) {
4415 			/*
4416 			 * We go directly to the online state if no remote
4417 			 * ports were discovered.
4418 			 */
4419 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4420 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4421 			    "No remote ports discovered");
4422 
4423 			pptr->port_state &= ~FCP_STATE_ONLINING;
4424 			pptr->port_state |= FCP_STATE_ONLINE;
4425 		}
4426 
4427 		switch (port_top) {
4428 		case FC_TOP_FABRIC:
4429 		case FC_TOP_PUBLIC_LOOP:
4430 		case FC_TOP_PRIVATE_LOOP:
4431 		case FC_TOP_PT_PT:
4432 
4433 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4434 				fcp_retry_ns_registry(pptr, port_sid);
4435 			}
4436 
4437 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4438 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4439 			break;
4440 
4441 		default:
4442 			/*
4443 			 * We got here because we were provided with an unknown
4444 			 * topology.
4445 			 */
4446 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4447 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4448 			}
4449 
4450 			pptr->port_tmp_cnt -= dev_cnt;
4451 			fcp_log(CE_WARN, pptr->port_dip,
4452 			    "!unknown/unsupported topology (0x%x)", port_top);
4453 			break;
4454 		}
4455 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4456 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4457 		    "Notify ssd of the reset to reinstate the reservations");
4458 
4459 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4460 		    &pptr->port_reset_notify_listf);
4461 
4462 		mutex_exit(&pptr->port_mutex);
4463 
4464 		break;
4465 
4466 	case FC_STATE_RESET:
4467 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4468 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4469 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4470 		    "RESET state, waiting for Offline/Online state_cb");
4471 		mutex_exit(&pptr->port_mutex);
4472 		break;
4473 
4474 	case FC_STATE_DEVICE_CHANGE:
4475 		/*
4476 		 * We come here when an application has requested
4477 		 * Dynamic node creation/deletion in Fabric connectivity.
4478 		 */
4479 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4480 		    FCP_STATE_INIT)) {
4481 			/*
4482 			 * This case can happen when the FCTL is in the
4483 			 * process of giving us on online and the host on
4484 			 * the other side issues a PLOGI/PLOGO. Ideally
4485 			 * the state changes should be serialized unless
4486 			 * they are opposite (online-offline).
4487 			 * The transport will give us a final state change
4488 			 * so we can ignore this for the time being.
4489 			 */
4490 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4491 			mutex_exit(&pptr->port_mutex);
4492 			break;
4493 		}
4494 
4495 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4496 			fcp_retry_ns_registry(pptr, port_sid);
4497 		}
4498 
4499 		/*
4500 		 * Extend the deadline under steady state conditions
4501 		 * to provide more time for the device-change-commands
4502 		 */
4503 		if (!pptr->port_ipkt_cnt) {
4504 			pptr->port_deadline = fcp_watchdog_time +
4505 			    FCP_ICMD_DEADLINE;
4506 		}
4507 
4508 		/*
4509 		 * There is another race condition here, where if we were
4510 		 * in ONLINEING state and a devices in the map logs out,
4511 		 * fp will give another state change as DEVICE_CHANGE
4512 		 * and OLD. This will result in that target being offlined.
4513 		 * The pd_handle is freed. If from the first statec callback
4514 		 * we were going to fire a PLOGI/PRLI, the system will
4515 		 * panic in fc_ulp_transport with invalid pd_handle.
4516 		 * The fix is to check for the link_cnt before issuing
4517 		 * any command down.
4518 		 */
4519 		fcp_update_targets(pptr, devlist, dev_cnt,
4520 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4521 
4522 		link_count = pptr->port_link_cnt;
4523 
4524 		fcp_handle_devices(pptr, devlist, dev_cnt,
4525 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4526 
4527 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4528 
4529 		mutex_exit(&pptr->port_mutex);
4530 		break;
4531 
4532 	case FC_STATE_TARGET_PORT_RESET:
4533 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4534 			fcp_retry_ns_registry(pptr, port_sid);
4535 		}
4536 
4537 		/* Do nothing else */
4538 		mutex_exit(&pptr->port_mutex);
4539 		break;
4540 
4541 	default:
4542 		fcp_log(CE_WARN, pptr->port_dip,
4543 		    "!Invalid state change=0x%x", port_state);
4544 		mutex_exit(&pptr->port_mutex);
4545 		break;
4546 	}
4547 
4548 	if (map_tag) {
4549 		kmem_free(map_tag, map_len);
4550 	}
4551 }
4552 
4553 /*
4554  *     Function: fcp_handle_devices
4555  *
4556  *  Description: This function updates the devices currently known by
4557  *		 walking the list provided by the caller.  The list passed
4558  *		 by the caller is supposed to be the list of reachable
4559  *		 devices.
4560  *
4561  *     Argument: *pptr		Fcp port structure.
4562  *		 *devlist	Pointer to the first entry of a table
4563  *				containing the remote ports that can be
4564  *				reached.
4565  *		 dev_cnt	Number of entries pointed by devlist.
4566  *		 link_cnt	Link state count.
4567  *		 *map_tag	Array of fcp_map_tag_t structures.
4568  *		 cause		What caused this function to be called.
4569  *
4570  * Return Value: None
4571  *
4572  *	  Notes: The pptr->port_mutex must be held.
4573  */
4574 static void
4575 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4576     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4577 {
4578 	int			i;
4579 	int			check_finish_init = 0;
4580 	fc_portmap_t		*map_entry;
4581 	struct fcp_tgt	*ptgt = NULL;
4582 
4583 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4584 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4585 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4586 
4587 	if (dev_cnt) {
4588 		ASSERT(map_tag != NULL);
4589 	}
4590 
4591 	/*
4592 	 * The following code goes through the list of remote ports that are
4593 	 * accessible through this (pptr) local port (The list walked is the
4594 	 * one provided by the caller which is the list of the remote ports
4595 	 * currently reachable).  It checks if any of them was already
4596 	 * known by looking for the corresponding target structure based on
4597 	 * the world wide name.	 If a target is part of the list it is tagged
4598 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4599 	 *
4600 	 * Old comment
4601 	 * -----------
4602 	 * Before we drop port mutex; we MUST get the tags updated; This
4603 	 * two step process is somewhat slow, but more reliable.
4604 	 */
4605 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4606 		map_entry = &(devlist[i]);
4607 
4608 		/*
4609 		 * get ptr to this map entry in our port's
4610 		 * list (if any)
4611 		 */
4612 		ptgt = fcp_lookup_target(pptr,
4613 		    (uchar_t *)&(map_entry->map_pwwn));
4614 
4615 		if (ptgt) {
4616 			map_tag[i] = ptgt->tgt_change_cnt;
4617 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4618 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4619 			}
4620 		}
4621 	}
4622 
4623 	/*
4624 	 * At this point we know which devices of the new list were already
4625 	 * known (The field tgt_aux_state of the target structure has been
4626 	 * set to FCP_TGT_TAGGED).
4627 	 *
4628 	 * The following code goes through the list of targets currently known
4629 	 * by the local port (the list is actually a hashing table).  If a
4630 	 * target is found and is not tagged, it means the target cannot
4631 	 * be reached anymore through the local port (pptr).  It is offlined.
4632 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4633 	 */
4634 	for (i = 0; i < FCP_NUM_HASH; i++) {
4635 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4636 		    ptgt = ptgt->tgt_next) {
4637 			mutex_enter(&ptgt->tgt_mutex);
4638 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4639 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4640 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4641 				fcp_offline_target_now(pptr, ptgt,
4642 				    link_cnt, ptgt->tgt_change_cnt, 0);
4643 			}
4644 			mutex_exit(&ptgt->tgt_mutex);
4645 		}
4646 	}
4647 
4648 	/*
4649 	 * At this point, the devices that were known but cannot be reached
4650 	 * anymore, have most likely been offlined.
4651 	 *
4652 	 * The following section of code seems to go through the list of
4653 	 * remote ports that can now be reached.  For every single one it
4654 	 * checks if it is already known or if it is a new port.
4655 	 */
4656 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4657 
4658 		if (check_finish_init) {
4659 			ASSERT(i > 0);
4660 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4661 			    map_tag[i - 1], cause);
4662 			check_finish_init = 0;
4663 		}
4664 
4665 		/* get a pointer to this map entry */
4666 		map_entry = &(devlist[i]);
4667 
4668 		/*
4669 		 * Check for the duplicate map entry flag. If we have marked
4670 		 * this entry as a duplicate we skip it since the correct
4671 		 * (perhaps even same) state change will be encountered
4672 		 * later in the list.
4673 		 */
4674 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4675 			continue;
4676 		}
4677 
4678 		/* get ptr to this map entry in our port's list (if any) */
4679 		ptgt = fcp_lookup_target(pptr,
4680 		    (uchar_t *)&(map_entry->map_pwwn));
4681 
4682 		if (ptgt) {
4683 			/*
4684 			 * This device was already known.  The field
4685 			 * tgt_aux_state is reset (was probably set to
4686 			 * FCP_TGT_TAGGED previously in this routine).
4687 			 */
4688 			ptgt->tgt_aux_state = 0;
4689 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4690 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4691 			    "handle_devices: map did/state/type/flags = "
4692 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4693 			    "tgt_state=%d",
4694 			    map_entry->map_did.port_id, map_entry->map_state,
4695 			    map_entry->map_type, map_entry->map_flags,
4696 			    ptgt->tgt_d_id, ptgt->tgt_state);
4697 		}
4698 
4699 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4700 		    map_entry->map_type == PORT_DEVICE_NEW ||
4701 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4702 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4703 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4704 			    "map_type=%x, did = %x",
4705 			    map_entry->map_type,
4706 			    map_entry->map_did.port_id);
4707 		}
4708 
4709 		switch (map_entry->map_type) {
4710 		case PORT_DEVICE_NOCHANGE:
4711 		case PORT_DEVICE_USER_CREATE:
4712 		case PORT_DEVICE_USER_LOGIN:
4713 		case PORT_DEVICE_NEW:
4714 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4715 
4716 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4717 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4718 			    cause) == TRUE) {
4719 
4720 				FCP_TGT_TRACE(ptgt, map_tag[i],
4721 				    FCP_TGT_TRACE_2);
4722 				check_finish_init++;
4723 			}
4724 			break;
4725 
4726 		case PORT_DEVICE_OLD:
4727 			if (ptgt != NULL) {
4728 				FCP_TGT_TRACE(ptgt, map_tag[i],
4729 				    FCP_TGT_TRACE_3);
4730 
4731 				mutex_enter(&ptgt->tgt_mutex);
4732 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4733 					/*
4734 					 * Must do an in-line wait for I/Os
4735 					 * to get drained
4736 					 */
4737 					mutex_exit(&ptgt->tgt_mutex);
4738 					mutex_exit(&pptr->port_mutex);
4739 
4740 					mutex_enter(&ptgt->tgt_mutex);
4741 					while (ptgt->tgt_ipkt_cnt ||
4742 					    fcp_outstanding_lun_cmds(ptgt)
4743 					    == FC_SUCCESS) {
4744 						mutex_exit(&ptgt->tgt_mutex);
4745 						delay(drv_usectohz(1000000));
4746 						mutex_enter(&ptgt->tgt_mutex);
4747 					}
4748 					mutex_exit(&ptgt->tgt_mutex);
4749 
4750 					mutex_enter(&pptr->port_mutex);
4751 					mutex_enter(&ptgt->tgt_mutex);
4752 
4753 					(void) fcp_offline_target(pptr, ptgt,
4754 					    link_cnt, map_tag[i], 0, 0);
4755 				}
4756 				mutex_exit(&ptgt->tgt_mutex);
4757 			}
4758 			check_finish_init++;
4759 			break;
4760 
4761 		case PORT_DEVICE_USER_DELETE:
4762 		case PORT_DEVICE_USER_LOGOUT:
4763 			if (ptgt != NULL) {
4764 				FCP_TGT_TRACE(ptgt, map_tag[i],
4765 				    FCP_TGT_TRACE_4);
4766 
4767 				mutex_enter(&ptgt->tgt_mutex);
4768 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4769 					(void) fcp_offline_target(pptr, ptgt,
4770 					    link_cnt, map_tag[i], 1, 0);
4771 				}
4772 				mutex_exit(&ptgt->tgt_mutex);
4773 			}
4774 			check_finish_init++;
4775 			break;
4776 
4777 		case PORT_DEVICE_CHANGED:
4778 			if (ptgt != NULL) {
4779 				FCP_TGT_TRACE(ptgt, map_tag[i],
4780 				    FCP_TGT_TRACE_5);
4781 
4782 				if (fcp_device_changed(pptr, ptgt,
4783 				    map_entry, link_cnt, map_tag[i],
4784 				    cause) == TRUE) {
4785 					check_finish_init++;
4786 				}
4787 			} else {
4788 				if (fcp_handle_mapflags(pptr, ptgt,
4789 				    map_entry, link_cnt, 0, cause) == TRUE) {
4790 					check_finish_init++;
4791 				}
4792 			}
4793 			break;
4794 
4795 		default:
4796 			fcp_log(CE_WARN, pptr->port_dip,
4797 			    "!Invalid map_type=0x%x", map_entry->map_type);
4798 			check_finish_init++;
4799 			break;
4800 		}
4801 	}
4802 
4803 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4804 		ASSERT(i > 0);
4805 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4806 		    map_tag[i-1], cause);
4807 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4808 		fcp_offline_all(pptr, link_cnt, cause);
4809 	}
4810 }
4811 
4812 /*
4813  *     Function: fcp_handle_mapflags
4814  *
4815  *  Description: This function creates a target structure if the ptgt passed
4816  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4817  *		 into the target yet or the PRLI if we are logged into the
4818  *		 target already.  The rest of the treatment is done in the
4819  *		 callbacks of the PLOGI or PRLI.
4820  *
4821  *     Argument: *pptr		FCP Port structure.
4822  *		 *ptgt		Target structure.
4823  *		 *map_entry	Array of fc_portmap_t structures.
4824  *		 link_cnt	Link state count.
4825  *		 tgt_cnt	Target state count.
4826  *		 cause		What caused this function to be called.
4827  *
4828  * Return Value: TRUE	Failed
4829  *		 FALSE	Succeeded
4830  *
4831  *	  Notes: pptr->port_mutex must be owned.
4832  */
4833 static int
4834 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4835     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4836 {
4837 	int			lcount;
4838 	int			tcount;
4839 	int			ret = TRUE;
4840 	int			alloc;
4841 	struct fcp_ipkt	*icmd;
4842 	struct fcp_lun	*pseq_lun = NULL;
4843 	uchar_t			opcode;
4844 	int			valid_ptgt_was_passed = FALSE;
4845 
4846 	ASSERT(mutex_owned(&pptr->port_mutex));
4847 
4848 	/*
4849 	 * This case is possible where the FCTL has come up and done discovery
4850 	 * before FCP was loaded and attached. FCTL would have discovered the
4851 	 * devices and later the ULP came online. In this case ULP's would get
4852 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4853 	 */
4854 	if (ptgt == NULL) {
4855 		/* don't already have a target */
4856 		mutex_exit(&pptr->port_mutex);
4857 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4858 		mutex_enter(&pptr->port_mutex);
4859 
4860 		if (ptgt == NULL) {
4861 			fcp_log(CE_WARN, pptr->port_dip,
4862 			    "!FC target allocation failed");
4863 			return (ret);
4864 		}
4865 		mutex_enter(&ptgt->tgt_mutex);
4866 		ptgt->tgt_statec_cause = cause;
4867 		ptgt->tgt_tmp_cnt = 1;
4868 		mutex_exit(&ptgt->tgt_mutex);
4869 	} else {
4870 		valid_ptgt_was_passed = TRUE;
4871 	}
4872 
4873 	/*
4874 	 * Copy in the target parameters
4875 	 */
4876 	mutex_enter(&ptgt->tgt_mutex);
4877 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4878 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4879 	ptgt->tgt_pd_handle = map_entry->map_pd;
4880 	ptgt->tgt_fca_dev = NULL;
4881 
4882 	/* Copy port and node WWNs */
4883 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4884 	    FC_WWN_SIZE);
4885 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4886 	    FC_WWN_SIZE);
4887 
4888 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4889 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4890 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4891 	    valid_ptgt_was_passed) {
4892 		/*
4893 		 * determine if there are any tape LUNs on this target
4894 		 */
4895 		for (pseq_lun = ptgt->tgt_lun;
4896 		    pseq_lun != NULL;
4897 		    pseq_lun = pseq_lun->lun_next) {
4898 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4899 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4900 				fcp_update_tgt_state(ptgt, FCP_RESET,
4901 				    FCP_LUN_MARK);
4902 				mutex_exit(&ptgt->tgt_mutex);
4903 				return (ret);
4904 			}
4905 		}
4906 	}
4907 
4908 	/*
4909 	 * If ptgt was NULL when this function was entered, then tgt_node_state
4910 	 * was never specifically initialized but zeroed out which means
4911 	 * FCP_TGT_NODE_NONE.
4912 	 */
4913 	switch (ptgt->tgt_node_state) {
4914 	case FCP_TGT_NODE_NONE:
4915 	case FCP_TGT_NODE_ON_DEMAND:
4916 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4917 		    !fcp_enable_auto_configuration &&
4918 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4919 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4920 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4921 		    fcp_enable_auto_configuration &&
4922 		    (ptgt->tgt_manual_config_only == 1) &&
4923 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4924 			/*
4925 			 * If auto configuration is set and
4926 			 * the tgt_manual_config_only flag is set then
4927 			 * we only want the user to be able to change
4928 			 * the state through create_on_demand.
4929 			 */
4930 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4931 		} else {
4932 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4933 		}
4934 		break;
4935 
4936 	case FCP_TGT_NODE_PRESENT:
4937 		break;
4938 	}
4939 	/*
4940 	 * If we are booting from a fabric device, make sure we
4941 	 * mark the node state appropriately for this target to be
4942 	 * enumerated
4943 	 */
4944 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
4945 		if (bcmp((caddr_t)pptr->port_boot_wwn,
4946 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
4947 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
4948 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4949 		}
4950 	}
4951 	mutex_exit(&ptgt->tgt_mutex);
4952 
4953 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4954 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4955 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
4956 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
4957 	    map_entry->map_rscn_info.ulp_rscn_count);
4958 
4959 	mutex_enter(&ptgt->tgt_mutex);
4960 
4961 	/*
4962 	 * Reset target OFFLINE state and mark the target BUSY
4963 	 */
4964 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
4965 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
4966 
4967 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
4968 	lcount = link_cnt;
4969 
4970 	mutex_exit(&ptgt->tgt_mutex);
4971 	mutex_exit(&pptr->port_mutex);
4972 
4973 	/*
4974 	 * if we are already logged in, then we do a PRLI, else
4975 	 * we do a PLOGI first (to get logged in)
4976 	 *
4977 	 * We will not check if we are the PLOGI initiator
4978 	 */
4979 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
4980 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
4981 
4982 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
4983 
4984 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount,
4985 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
4986 
4987 	if (icmd == NULL) {
4988 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
4989 		/*
4990 		 * We've exited port_mutex before calling fcp_icmd_alloc,
4991 		 * we need to make sure we reacquire it before returning.
4992 		 */
4993 		mutex_enter(&pptr->port_mutex);
4994 		return (FALSE);
4995 	}
4996 
4997 	/* TRUE is only returned while target is intended skipped */
4998 	ret = FALSE;
4999 	/* discover info about this target */
5000 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5001 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5002 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5003 	} else {
5004 		fcp_icmd_free(pptr, icmd);
5005 		ret = TRUE;
5006 	}
5007 	mutex_enter(&pptr->port_mutex);
5008 
5009 	return (ret);
5010 }
5011 
5012 /*
5013  *     Function: fcp_send_els
5014  *
5015  *  Description: Sends an ELS to the target specified by the caller.  Supports
5016  *		 PLOGI and PRLI.
5017  *
5018  *     Argument: *pptr		Fcp port.
5019  *		 *ptgt		Target to send the ELS to.
5020  *		 *icmd		Internal packet
5021  *		 opcode		ELS opcode
5022  *		 lcount		Link state change counter
5023  *		 tcount		Target state change counter
5024  *		 cause		What caused the call
5025  *
5026  * Return Value: DDI_SUCCESS
5027  *		 Others
5028  */
5029 static int
5030 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5031     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5032 {
5033 	fc_packet_t		*fpkt;
5034 	fc_frame_hdr_t		*hp;
5035 	int			internal = 0;
5036 	int			alloc;
5037 	int			cmd_len;
5038 	int			resp_len;
5039 	int			res = DDI_FAILURE; /* default result */
5040 	int			rval = DDI_FAILURE;
5041 
5042 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5043 	ASSERT(ptgt->tgt_port == pptr);
5044 
5045 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5046 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5047 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5048 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5049 
5050 	if (opcode == LA_ELS_PLOGI) {
5051 		cmd_len = sizeof (la_els_logi_t);
5052 		resp_len = sizeof (la_els_logi_t);
5053 	} else {
5054 		ASSERT(opcode == LA_ELS_PRLI);
5055 		cmd_len = sizeof (la_els_prli_t);
5056 		resp_len = sizeof (la_els_prli_t);
5057 	}
5058 
5059 	if (icmd == NULL) {
5060 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5061 		    sizeof (la_els_prli_t));
5062 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0,
5063 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5064 		if (icmd == NULL) {
5065 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5066 			return (res);
5067 		}
5068 		internal++;
5069 	}
5070 	fpkt = icmd->ipkt_fpkt;
5071 
5072 	fpkt->pkt_cmdlen = cmd_len;
5073 	fpkt->pkt_rsplen = resp_len;
5074 	fpkt->pkt_datalen = 0;
5075 	icmd->ipkt_retries = 0;
5076 
5077 	/* fill in fpkt info */
5078 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5079 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5080 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5081 
5082 	/* get ptr to frame hdr in fpkt */
5083 	hp = &fpkt->pkt_cmd_fhdr;
5084 
5085 	/*
5086 	 * fill in frame hdr
5087 	 */
5088 	hp->r_ctl = R_CTL_ELS_REQ;
5089 	hp->s_id = pptr->port_id;	/* source ID */
5090 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5091 	hp->type = FC_TYPE_EXTENDED_LS;
5092 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5093 	hp->seq_id = 0;
5094 	hp->rsvd = 0;
5095 	hp->df_ctl  = 0;
5096 	hp->seq_cnt = 0;
5097 	hp->ox_id = 0xffff;		/* i.e. none */
5098 	hp->rx_id = 0xffff;		/* i.e. none */
5099 	hp->ro = 0;
5100 
5101 	/*
5102 	 * at this point we have a filled in cmd pkt
5103 	 *
5104 	 * fill in the respective info, then use the transport to send
5105 	 * the packet
5106 	 *
5107 	 * for a PLOGI call fc_ulp_login(), and
5108 	 * for a PRLI call fc_ulp_issue_els()
5109 	 */
5110 	switch (opcode) {
5111 	case LA_ELS_PLOGI: {
5112 		struct la_els_logi logi;
5113 
5114 		bzero(&logi, sizeof (struct la_els_logi));
5115 
5116 		hp = &fpkt->pkt_cmd_fhdr;
5117 		hp->r_ctl = R_CTL_ELS_REQ;
5118 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5119 		logi.ls_code.mbz = 0;
5120 
5121 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5122 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5123 
5124 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5125 
5126 		mutex_enter(&pptr->port_mutex);
5127 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5128 
5129 			mutex_exit(&pptr->port_mutex);
5130 
5131 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5132 			if (rval == FC_SUCCESS) {
5133 				res = DDI_SUCCESS;
5134 				break;
5135 			}
5136 
5137 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5138 
5139 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5140 			    rval, "PLOGI");
5141 		} else {
5142 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5143 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5144 			    "fcp_send_els1: state change occured"
5145 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5146 			mutex_exit(&pptr->port_mutex);
5147 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5148 		}
5149 		break;
5150 	}
5151 
5152 	case LA_ELS_PRLI: {
5153 		struct la_els_prli	prli;
5154 		struct fcp_prli		*fprli;
5155 
5156 		bzero(&prli, sizeof (struct la_els_prli));
5157 
5158 		hp = &fpkt->pkt_cmd_fhdr;
5159 		hp->r_ctl = R_CTL_ELS_REQ;
5160 
5161 		/* fill in PRLI cmd ELS fields */
5162 		prli.ls_code = LA_ELS_PRLI;
5163 		prli.page_length = 0x10;	/* huh? */
5164 		prli.payload_length = sizeof (struct la_els_prli);
5165 
5166 		icmd->ipkt_opcode = LA_ELS_PRLI;
5167 
5168 		/* get ptr to PRLI service params */
5169 		fprli = (struct fcp_prli *)prli.service_params;
5170 
5171 		/* fill in service params */
5172 		fprli->type = 0x08;
5173 		fprli->resvd1 = 0;
5174 		fprli->orig_process_assoc_valid = 0;
5175 		fprli->resp_process_assoc_valid = 0;
5176 		fprli->establish_image_pair = 1;
5177 		fprli->resvd2 = 0;
5178 		fprli->resvd3 = 0;
5179 		fprli->obsolete_1 = 0;
5180 		fprli->obsolete_2 = 0;
5181 		fprli->data_overlay_allowed = 0;
5182 		fprli->initiator_fn = 1;
5183 		fprli->confirmed_compl_allowed = 1;
5184 
5185 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5186 			fprli->target_fn = 1;
5187 		} else {
5188 			fprli->target_fn = 0;
5189 		}
5190 
5191 		fprli->retry = 1;
5192 		fprli->read_xfer_rdy_disabled = 1;
5193 		fprli->write_xfer_rdy_disabled = 0;
5194 
5195 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5196 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5197 
5198 		/* issue the PRLI request */
5199 
5200 		mutex_enter(&pptr->port_mutex);
5201 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5202 
5203 			mutex_exit(&pptr->port_mutex);
5204 
5205 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5206 			if (rval == FC_SUCCESS) {
5207 				res = DDI_SUCCESS;
5208 				break;
5209 			}
5210 
5211 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5212 
5213 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5214 			    rval, "PRLI");
5215 		} else {
5216 			mutex_exit(&pptr->port_mutex);
5217 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5218 		}
5219 		break;
5220 	}
5221 
5222 	default:
5223 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5224 		break;
5225 	}
5226 
5227 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5228 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5229 	    "fcp_send_els: returning %d", res);
5230 
5231 	if (res != DDI_SUCCESS) {
5232 		if (internal) {
5233 			fcp_icmd_free(pptr, icmd);
5234 		}
5235 	}
5236 
5237 	return (res);
5238 }
5239 
5240 
5241 /*
5242  * called internally update the state of all of the tgts and each LUN
5243  * for this port (i.e. each target  known to be attached to this port)
5244  * if they are not already offline
5245  *
5246  * must be called with the port mutex owned
5247  *
5248  * acquires and releases the target mutexes for each target attached
5249  * to this port
5250  */
5251 void
5252 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5253 {
5254 	int i;
5255 	struct fcp_tgt *ptgt;
5256 
5257 	ASSERT(mutex_owned(&pptr->port_mutex));
5258 
5259 	for (i = 0; i < FCP_NUM_HASH; i++) {
5260 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5261 		    ptgt = ptgt->tgt_next) {
5262 			mutex_enter(&ptgt->tgt_mutex);
5263 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5264 			ptgt->tgt_change_cnt++;
5265 			ptgt->tgt_statec_cause = cause;
5266 			ptgt->tgt_tmp_cnt = 1;
5267 			ptgt->tgt_done = 0;
5268 			mutex_exit(&ptgt->tgt_mutex);
5269 		}
5270 	}
5271 }
5272 
5273 
5274 static void
5275 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5276 {
5277 	int i;
5278 	int ndevs;
5279 	struct fcp_tgt *ptgt;
5280 
5281 	ASSERT(mutex_owned(&pptr->port_mutex));
5282 
5283 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5284 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5285 		    ptgt = ptgt->tgt_next) {
5286 			ndevs++;
5287 		}
5288 	}
5289 
5290 	if (ndevs == 0) {
5291 		return;
5292 	}
5293 	pptr->port_tmp_cnt = ndevs;
5294 
5295 	for (i = 0; i < FCP_NUM_HASH; i++) {
5296 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5297 		    ptgt = ptgt->tgt_next) {
5298 			(void) fcp_call_finish_init_held(pptr, ptgt,
5299 			    lcount, ptgt->tgt_change_cnt, cause);
5300 		}
5301 	}
5302 }
5303 
5304 /*
5305  *     Function: fcp_update_tgt_state
5306  *
5307  *  Description: This function updates the field tgt_state of a target.	 That
5308  *		 field is a bitmap and which bit can be set or reset
5309  *		 individually.	The action applied to the target state is also
5310  *		 applied to all the LUNs belonging to the target (provided the
5311  *		 LUN is not offline).  A side effect of applying the state
5312  *		 modification to the target and the LUNs is the field tgt_trace
5313  *		 of the target and lun_trace of the LUNs is set to zero.
5314  *
5315  *
5316  *     Argument: *ptgt	Target structure.
5317  *		 flag	Flag indication what action to apply (set/reset).
5318  *		 state	State bits to update.
5319  *
5320  * Return Value: None
5321  *
5322  *	Context: Interrupt, Kernel or User context.
5323  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5324  *		 calling this function.
5325  */
5326 void
5327 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5328 {
5329 	struct fcp_lun *plun;
5330 
5331 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5332 
5333 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5334 		/* The target is not offline. */
5335 		if (flag == FCP_SET) {
5336 			ptgt->tgt_state |= state;
5337 			ptgt->tgt_trace = 0;
5338 		} else {
5339 			ptgt->tgt_state &= ~state;
5340 		}
5341 
5342 		for (plun = ptgt->tgt_lun; plun != NULL;
5343 		    plun = plun->lun_next) {
5344 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5345 				/* The LUN is not offline. */
5346 				if (flag == FCP_SET) {
5347 					plun->lun_state |= state;
5348 					plun->lun_trace = 0;
5349 				} else {
5350 					plun->lun_state &= ~state;
5351 				}
5352 			}
5353 		}
5354 	}
5355 }
5356 
5357 /*
5358  *     Function: fcp_update_tgt_state
5359  *
5360  *  Description: This function updates the field lun_state of a LUN.  That
5361  *		 field is a bitmap and which bit can be set or reset
5362  *		 individually.
5363  *
5364  *     Argument: *plun	LUN structure.
5365  *		 flag	Flag indication what action to apply (set/reset).
5366  *		 state	State bits to update.
5367  *
5368  * Return Value: None
5369  *
5370  *	Context: Interrupt, Kernel or User context.
5371  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5372  *		 calling this function.
5373  */
5374 void
5375 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5376 {
5377 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5378 
5379 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5380 
5381 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5382 		if (flag == FCP_SET) {
5383 			plun->lun_state |= state;
5384 		} else {
5385 			plun->lun_state &= ~state;
5386 		}
5387 	}
5388 }
5389 
5390 /*
5391  *     Function: fcp_get_port
5392  *
5393  *  Description: This function returns the fcp_port structure from the opaque
5394  *		 handle passed by the caller.  That opaque handle is the handle
5395  *		 used by fp/fctl to identify a particular local port.  That
5396  *		 handle has been stored in the corresponding fcp_port
5397  *		 structure.  This function is going to walk the global list of
5398  *		 fcp_port structures till one has a port_fp_handle that matches
5399  *		 the handle passed by the caller.  This function enters the
5400  *		 mutex fcp_global_mutex while walking the global list and then
5401  *		 releases it.
5402  *
5403  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5404  *				particular port.
5405  *
5406  * Return Value: NULL		Not found.
5407  *		 Not NULL	Pointer to the fcp_port structure.
5408  *
5409  *	Context: Interrupt, Kernel or User context.
5410  */
5411 static struct fcp_port *
5412 fcp_get_port(opaque_t port_handle)
5413 {
5414 	struct fcp_port *pptr;
5415 
5416 	ASSERT(port_handle != NULL);
5417 
5418 	mutex_enter(&fcp_global_mutex);
5419 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5420 		if (pptr->port_fp_handle == port_handle) {
5421 			break;
5422 		}
5423 	}
5424 	mutex_exit(&fcp_global_mutex);
5425 
5426 	return (pptr);
5427 }
5428 
5429 
5430 static void
5431 fcp_unsol_callback(fc_packet_t *fpkt)
5432 {
5433 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5434 	struct fcp_port *pptr = icmd->ipkt_port;
5435 
5436 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5437 		caddr_t state, reason, action, expln;
5438 
5439 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5440 		    &action, &expln);
5441 
5442 		fcp_log(CE_WARN, pptr->port_dip,
5443 		    "!couldn't post response to unsolicited request: "
5444 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5445 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5446 		    fpkt->pkt_cmd_fhdr.rx_id);
5447 	}
5448 	fcp_icmd_free(pptr, icmd);
5449 }
5450 
5451 
5452 /*
5453  * Perform general purpose preparation of a response to an unsolicited request
5454  */
5455 static void
5456 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5457     uchar_t r_ctl, uchar_t type)
5458 {
5459 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5460 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5461 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5462 	pkt->pkt_cmd_fhdr.type = type;
5463 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5464 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5465 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5466 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5467 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5468 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5469 	pkt->pkt_cmd_fhdr.ro = 0;
5470 	pkt->pkt_cmd_fhdr.rsvd = 0;
5471 	pkt->pkt_comp = fcp_unsol_callback;
5472 	pkt->pkt_pd = NULL;
5473 }
5474 
5475 
5476 /*ARGSUSED*/
5477 static int
5478 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5479 {
5480 	fc_packet_t		*fpkt;
5481 	struct la_els_prli	prli;
5482 	struct fcp_prli		*fprli;
5483 	struct fcp_ipkt	*icmd;
5484 	struct la_els_prli	*from;
5485 	struct fcp_prli		*orig;
5486 	struct fcp_tgt	*ptgt;
5487 	int			tcount = 0;
5488 	int			lcount;
5489 
5490 	from = (struct la_els_prli *)buf->ub_buffer;
5491 	orig = (struct fcp_prli *)from->service_params;
5492 
5493 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5494 	    NULL) {
5495 		mutex_enter(&ptgt->tgt_mutex);
5496 		tcount = ptgt->tgt_change_cnt;
5497 		mutex_exit(&ptgt->tgt_mutex);
5498 	}
5499 	mutex_enter(&pptr->port_mutex);
5500 	lcount = pptr->port_link_cnt;
5501 	mutex_exit(&pptr->port_mutex);
5502 
5503 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5504 	    sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0,
5505 	    FC_INVALID_RSCN_COUNT)) == NULL) {
5506 		return (FC_FAILURE);
5507 	}
5508 	fpkt = icmd->ipkt_fpkt;
5509 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5510 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5511 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5512 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5513 	fpkt->pkt_rsplen = 0;
5514 	fpkt->pkt_datalen = 0;
5515 
5516 	icmd->ipkt_opcode = LA_ELS_PRLI;
5517 
5518 	bzero(&prli, sizeof (struct la_els_prli));
5519 	fprli = (struct fcp_prli *)prli.service_params;
5520 	prli.ls_code = LA_ELS_ACC;
5521 	prli.page_length = 0x10;
5522 	prli.payload_length = sizeof (struct la_els_prli);
5523 
5524 	/* fill in service params */
5525 	fprli->type = 0x08;
5526 	fprli->resvd1 = 0;
5527 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5528 	fprli->orig_process_associator = orig->orig_process_associator;
5529 	fprli->resp_process_assoc_valid = 0;
5530 	fprli->establish_image_pair = 1;
5531 	fprli->resvd2 = 0;
5532 	fprli->resvd3 = 0;
5533 	fprli->obsolete_1 = 0;
5534 	fprli->obsolete_2 = 0;
5535 	fprli->data_overlay_allowed = 0;
5536 	fprli->initiator_fn = 1;
5537 	fprli->confirmed_compl_allowed = 1;
5538 
5539 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5540 		fprli->target_fn = 1;
5541 	} else {
5542 		fprli->target_fn = 0;
5543 	}
5544 
5545 	fprli->retry = 1;
5546 	fprli->read_xfer_rdy_disabled = 1;
5547 	fprli->write_xfer_rdy_disabled = 0;
5548 
5549 	/* save the unsol prli payload first */
5550 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5551 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5552 
5553 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5554 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5555 
5556 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5557 
5558 	mutex_enter(&pptr->port_mutex);
5559 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5560 		int rval;
5561 		mutex_exit(&pptr->port_mutex);
5562 
5563 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5564 		    FC_SUCCESS) {
5565 			if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
5566 				fcp_queue_ipkt(pptr, fpkt);
5567 				return (FC_SUCCESS);
5568 			}
5569 			/* Let it timeout */
5570 			fcp_icmd_free(pptr, icmd);
5571 			return (FC_FAILURE);
5572 		}
5573 	} else {
5574 		mutex_exit(&pptr->port_mutex);
5575 		fcp_icmd_free(pptr, icmd);
5576 		return (FC_FAILURE);
5577 	}
5578 
5579 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5580 
5581 	return (FC_SUCCESS);
5582 }
5583 
5584 /*
5585  *     Function: fcp_icmd_alloc
5586  *
5587  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5588  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5589  *		 modified by the caller (such as fcp_send_scsi).  The
5590  *		 structure is also tied to the state of the line and of the
5591  *		 target at a particular time.  That link is established by
5592  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5593  *		 and tcount which came respectively from pptr->link_cnt and
5594  *		 ptgt->tgt_change_cnt.
5595  *
5596  *     Argument: *pptr		Fcp port.
5597  *		 *ptgt		Target (destination of the command).
5598  *		 cmd_len	Length of the command.
5599  *		 resp_len	Length of the expected response.
5600  *		 data_len	Length of the data.
5601  *		 nodma		Indicates weither the command and response.
5602  *				will be transfer through DMA or not.
5603  *		 lcount		Link state change counter.
5604  *		 tcount		Target state change counter.
5605  *		 cause		Reason that lead to this call.
5606  *
5607  * Return Value: NULL		Failed.
5608  *		 Not NULL	Internal packet address.
5609  */
5610 static struct fcp_ipkt *
5611 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5612     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5613     uint32_t rscn_count)
5614 {
5615 	int			dma_setup = 0;
5616 	fc_packet_t		*fpkt;
5617 	struct fcp_ipkt	*icmd = NULL;
5618 
5619 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5620 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5621 	    KM_NOSLEEP);
5622 	if (icmd == NULL) {
5623 		fcp_log(CE_WARN, pptr->port_dip,
5624 		    "!internal packet allocation failed");
5625 		return (NULL);
5626 	}
5627 
5628 	/*
5629 	 * initialize the allocated packet
5630 	 */
5631 	icmd->ipkt_nodma = nodma;
5632 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5633 	icmd->ipkt_lun = NULL;
5634 
5635 	icmd->ipkt_link_cnt = lcount;
5636 	icmd->ipkt_change_cnt = tcount;
5637 	icmd->ipkt_cause = cause;
5638 
5639 	mutex_enter(&pptr->port_mutex);
5640 	icmd->ipkt_port = pptr;
5641 	mutex_exit(&pptr->port_mutex);
5642 
5643 	/* keep track of amt of data to be sent in pkt */
5644 	icmd->ipkt_cmdlen = cmd_len;
5645 	icmd->ipkt_resplen = resp_len;
5646 	icmd->ipkt_datalen = data_len;
5647 
5648 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5649 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5650 
5651 	/* set pkt's private ptr to point to cmd pkt */
5652 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5653 
5654 	/* set FCA private ptr to memory just beyond */
5655 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5656 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5657 	    pptr->port_dmacookie_sz);
5658 
5659 	/* get ptr to fpkt substruct and fill it in */
5660 	fpkt = icmd->ipkt_fpkt;
5661 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5662 	    sizeof (struct fcp_ipkt));
5663 
5664 	if (ptgt != NULL) {
5665 		icmd->ipkt_tgt = ptgt;
5666 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5667 	}
5668 
5669 	fpkt->pkt_comp = fcp_icmd_callback;
5670 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5671 	fpkt->pkt_cmdlen = cmd_len;
5672 	fpkt->pkt_rsplen = resp_len;
5673 	fpkt->pkt_datalen = data_len;
5674 
5675 	/*
5676 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5677 	 * rscn_count as fcp knows down to the transport. If a valid count was
5678 	 * passed into this function, we allocate memory to actually pass down
5679 	 * this info.
5680 	 *
5681 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5682 	 * basically mean that fcp will not be able to help transport
5683 	 * distinguish if a new RSCN has come after fcp was last informed about
5684 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5685 	 * 5068068 where the device might end up going offline in case of RSCN
5686 	 * storms.
5687 	 */
5688 	fpkt->pkt_ulp_rscn_infop = NULL;
5689 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5690 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5691 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5692 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5693 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5694 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5695 			    "Failed to alloc memory to pass rscn info");
5696 		}
5697 	}
5698 
5699 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5700 		fc_ulp_rscn_info_t	*rscnp;
5701 
5702 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5703 		rscnp->ulp_rscn_count = rscn_count;
5704 	}
5705 
5706 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5707 		goto fail;
5708 	}
5709 	dma_setup++;
5710 
5711 	/*
5712 	 * Must hold target mutex across setting of pkt_pd and call to
5713 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5714 	 * away while we're not looking.
5715 	 */
5716 	if (ptgt != NULL) {
5717 		mutex_enter(&ptgt->tgt_mutex);
5718 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5719 
5720 		/* ask transport to do its initialization on this pkt */
5721 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5722 		    != FC_SUCCESS) {
5723 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5724 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5725 			    "fc_ulp_init_packet failed");
5726 			mutex_exit(&ptgt->tgt_mutex);
5727 			goto fail;
5728 		}
5729 		mutex_exit(&ptgt->tgt_mutex);
5730 	} else {
5731 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5732 		    != FC_SUCCESS) {
5733 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5734 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5735 			    "fc_ulp_init_packet failed");
5736 			goto fail;
5737 		}
5738 	}
5739 
5740 	mutex_enter(&pptr->port_mutex);
5741 	if (pptr->port_state & (FCP_STATE_DETACHING |
5742 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5743 		int rval;
5744 
5745 		mutex_exit(&pptr->port_mutex);
5746 
5747 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5748 		ASSERT(rval == FC_SUCCESS);
5749 
5750 		goto fail;
5751 	}
5752 
5753 	if (ptgt != NULL) {
5754 		mutex_enter(&ptgt->tgt_mutex);
5755 		ptgt->tgt_ipkt_cnt++;
5756 		mutex_exit(&ptgt->tgt_mutex);
5757 	}
5758 
5759 	pptr->port_ipkt_cnt++;
5760 
5761 	mutex_exit(&pptr->port_mutex);
5762 
5763 	return (icmd);
5764 
5765 fail:
5766 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5767 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5768 		    sizeof (fc_ulp_rscn_info_t));
5769 		fpkt->pkt_ulp_rscn_infop = NULL;
5770 	}
5771 
5772 	if (dma_setup) {
5773 		fcp_free_dma(pptr, icmd);
5774 	}
5775 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5776 	    (size_t)pptr->port_dmacookie_sz);
5777 
5778 	return (NULL);
5779 }
5780 
5781 /*
5782  *     Function: fcp_icmd_free
5783  *
5784  *  Description: Frees the internal command passed by the caller.
5785  *
5786  *     Argument: *pptr		Fcp port.
5787  *		 *icmd		Internal packet to free.
5788  *
5789  * Return Value: None
5790  */
5791 static void
5792 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5793 {
5794 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5795 
5796 	/* Let the underlying layers do their cleanup. */
5797 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5798 	    icmd->ipkt_fpkt);
5799 
5800 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5801 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5802 		    sizeof (fc_ulp_rscn_info_t));
5803 	}
5804 
5805 	fcp_free_dma(pptr, icmd);
5806 
5807 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5808 	    (size_t)pptr->port_dmacookie_sz);
5809 
5810 	mutex_enter(&pptr->port_mutex);
5811 
5812 	if (ptgt) {
5813 		mutex_enter(&ptgt->tgt_mutex);
5814 		ptgt->tgt_ipkt_cnt--;
5815 		mutex_exit(&ptgt->tgt_mutex);
5816 	}
5817 
5818 	pptr->port_ipkt_cnt--;
5819 	mutex_exit(&pptr->port_mutex);
5820 }
5821 
5822 /*
5823  *     Function: fcp_alloc_dma
5824  *
5825  *  Description: Allocated the DMA resources required for the internal
5826  *		 packet.
5827  *
5828  *     Argument: *pptr	FCP port.
5829  *		 *icmd	Internal FCP packet.
5830  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5831  *		 flags	Allocation flags (Sleep or NoSleep).
5832  *
5833  * Return Value: FC_SUCCESS
5834  *		 FC_NOMEM
5835  */
5836 static int
5837 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5838     int nodma, int flags)
5839 {
5840 	int		rval;
5841 	size_t		real_size;
5842 	uint_t		ccount;
5843 	int		bound = 0;
5844 	int		cmd_resp = 0;
5845 	fc_packet_t	*fpkt;
5846 	ddi_dma_cookie_t	pkt_data_cookie;
5847 	ddi_dma_cookie_t	*cp;
5848 	uint32_t		cnt;
5849 
5850 	fpkt = &icmd->ipkt_fc_packet;
5851 
5852 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5853 	    fpkt->pkt_resp_dma == NULL);
5854 
5855 	icmd->ipkt_nodma = nodma;
5856 
5857 	if (nodma) {
5858 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5859 		if (fpkt->pkt_cmd == NULL) {
5860 			goto fail;
5861 		}
5862 
5863 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5864 		if (fpkt->pkt_resp == NULL) {
5865 			goto fail;
5866 		}
5867 	} else {
5868 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5869 
5870 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5871 		if (rval == FC_FAILURE) {
5872 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5873 			    fpkt->pkt_resp_dma == NULL);
5874 			goto fail;
5875 		}
5876 		cmd_resp++;
5877 	}
5878 
5879 	if (fpkt->pkt_datalen != 0) {
5880 		/*
5881 		 * set up DMA handle and memory for the data in this packet
5882 		 */
5883 		if (ddi_dma_alloc_handle(pptr->port_dip,
5884 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5885 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5886 			goto fail;
5887 		}
5888 
5889 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5890 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5891 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5892 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5893 			goto fail;
5894 		}
5895 
5896 		/* was DMA mem size gotten < size asked for/needed ?? */
5897 		if (real_size < fpkt->pkt_datalen) {
5898 			goto fail;
5899 		}
5900 
5901 		/* bind DMA address and handle together */
5902 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
5903 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
5904 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
5905 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
5906 			goto fail;
5907 		}
5908 		bound++;
5909 
5910 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
5911 			goto fail;
5912 		}
5913 
5914 		fpkt->pkt_data_cookie_cnt = ccount;
5915 
5916 		cp = fpkt->pkt_data_cookie;
5917 		*cp = pkt_data_cookie;
5918 		cp++;
5919 
5920 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
5921 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
5922 			    &pkt_data_cookie);
5923 			*cp = pkt_data_cookie;
5924 		}
5925 
5926 	}
5927 
5928 	return (FC_SUCCESS);
5929 
5930 fail:
5931 	if (bound) {
5932 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5933 	}
5934 
5935 	if (fpkt->pkt_data_dma) {
5936 		if (fpkt->pkt_data) {
5937 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5938 		}
5939 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5940 	}
5941 
5942 	if (nodma) {
5943 		if (fpkt->pkt_cmd) {
5944 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
5945 		}
5946 		if (fpkt->pkt_resp) {
5947 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
5948 		}
5949 	} else {
5950 		if (cmd_resp) {
5951 			fcp_free_cmd_resp(pptr, fpkt);
5952 		}
5953 	}
5954 
5955 	return (FC_NOMEM);
5956 }
5957 
5958 
5959 static void
5960 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5961 {
5962 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
5963 
5964 	if (fpkt->pkt_data_dma) {
5965 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5966 		if (fpkt->pkt_data) {
5967 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5968 		}
5969 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5970 	}
5971 
5972 	if (icmd->ipkt_nodma) {
5973 		if (fpkt->pkt_cmd) {
5974 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
5975 		}
5976 		if (fpkt->pkt_resp) {
5977 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
5978 		}
5979 	} else {
5980 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
5981 
5982 		fcp_free_cmd_resp(pptr, fpkt);
5983 	}
5984 }
5985 
5986 /*
5987  *     Function: fcp_lookup_target
5988  *
5989  *  Description: Finds a target given a WWN.
5990  *
5991  *     Argument: *pptr	FCP port.
5992  *		 *wwn	World Wide Name of the device to look for.
5993  *
5994  * Return Value: NULL		No target found
5995  *		 Not NULL	Target structure
5996  *
5997  *	Context: Interrupt context.
5998  *		 The mutex pptr->port_mutex must be owned.
5999  */
6000 /* ARGSUSED */
6001 static struct fcp_tgt *
6002 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6003 {
6004 	int			hash;
6005 	struct fcp_tgt	*ptgt;
6006 
6007 	ASSERT(mutex_owned(&pptr->port_mutex));
6008 
6009 	hash = FCP_HASH(wwn);
6010 
6011 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6012 	    ptgt = ptgt->tgt_next) {
6013 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6014 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6015 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6016 			break;
6017 		}
6018 	}
6019 
6020 	return (ptgt);
6021 }
6022 
6023 
6024 /*
6025  * Find target structure given a port identifier
6026  */
6027 static struct fcp_tgt *
6028 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6029 {
6030 	fc_portid_t		port_id;
6031 	la_wwn_t		pwwn;
6032 	struct fcp_tgt	*ptgt = NULL;
6033 
6034 	port_id.priv_lilp_posit = 0;
6035 	port_id.port_id = d_id;
6036 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6037 	    &pwwn) == FC_SUCCESS) {
6038 		mutex_enter(&pptr->port_mutex);
6039 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6040 		mutex_exit(&pptr->port_mutex);
6041 	}
6042 
6043 	return (ptgt);
6044 }
6045 
6046 
6047 /*
6048  * the packet completion callback routine for info cmd pkts
6049  *
6050  * this means fpkt pts to a response to either a PLOGI or a PRLI
6051  *
6052  * if there is an error an attempt is made to call a routine to resend
6053  * the command that failed
6054  */
6055 static void
6056 fcp_icmd_callback(fc_packet_t *fpkt)
6057 {
6058 	struct fcp_ipkt	*icmd;
6059 	struct fcp_port	*pptr;
6060 	struct fcp_tgt	*ptgt;
6061 	struct la_els_prli	*prli;
6062 	struct la_els_prli	prli_s;
6063 	struct fcp_prli		*fprli;
6064 	struct fcp_lun	*plun;
6065 	int		free_pkt = 1;
6066 	int		rval;
6067 	ls_code_t	resp;
6068 	uchar_t		prli_acc = 0;
6069 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6070 	int		lun0_newalloc;
6071 
6072 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6073 
6074 	/* get ptrs to the port and target structs for the cmd */
6075 	pptr = icmd->ipkt_port;
6076 	ptgt = icmd->ipkt_tgt;
6077 
6078 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6079 
6080 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6081 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6082 		    sizeof (prli_s));
6083 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6084 	}
6085 
6086 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6087 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6088 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6089 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6090 	    ptgt->tgt_d_id);
6091 
6092 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6093 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6094 
6095 		mutex_enter(&ptgt->tgt_mutex);
6096 		if (ptgt->tgt_pd_handle == NULL) {
6097 			/*
6098 			 * in a fabric environment the port device handles
6099 			 * get created only after successful LOGIN into the
6100 			 * transport, so the transport makes this port
6101 			 * device (pd) handle available in this packet, so
6102 			 * save it now
6103 			 */
6104 			ASSERT(fpkt->pkt_pd != NULL);
6105 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6106 		}
6107 		mutex_exit(&ptgt->tgt_mutex);
6108 
6109 		/* which ELS cmd is this response for ?? */
6110 		switch (icmd->ipkt_opcode) {
6111 		case LA_ELS_PLOGI:
6112 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6113 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6114 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6115 			    ptgt->tgt_d_id,
6116 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6117 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6118 
6119 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6120 			    FCP_TGT_TRACE_15);
6121 
6122 			/* Note that we are not allocating a new icmd */
6123 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6124 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6125 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6126 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6127 				    FCP_TGT_TRACE_16);
6128 				goto fail;
6129 			}
6130 			break;
6131 
6132 		case LA_ELS_PRLI:
6133 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6134 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6135 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6136 
6137 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6138 			    FCP_TGT_TRACE_17);
6139 
6140 			prli = &prli_s;
6141 
6142 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6143 			    sizeof (prli_s));
6144 
6145 			fprli = (struct fcp_prli *)prli->service_params;
6146 
6147 			mutex_enter(&ptgt->tgt_mutex);
6148 			ptgt->tgt_icap = fprli->initiator_fn;
6149 			ptgt->tgt_tcap = fprli->target_fn;
6150 			mutex_exit(&ptgt->tgt_mutex);
6151 
6152 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6153 				/*
6154 				 * this FCP device does not support target mode
6155 				 */
6156 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6157 				    FCP_TGT_TRACE_18);
6158 				goto fail;
6159 			}
6160 			if (fprli->retry == 1) {
6161 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6162 				    &ptgt->tgt_port_wwn);
6163 			}
6164 
6165 			/* target is no longer offline */
6166 			mutex_enter(&pptr->port_mutex);
6167 			mutex_enter(&ptgt->tgt_mutex);
6168 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6169 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6170 				    FCP_TGT_MARK);
6171 			} else {
6172 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6173 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6174 				    "fcp_icmd_callback,1: state change "
6175 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6176 				mutex_exit(&ptgt->tgt_mutex);
6177 				mutex_exit(&pptr->port_mutex);
6178 				goto fail;
6179 			}
6180 			mutex_exit(&ptgt->tgt_mutex);
6181 			mutex_exit(&pptr->port_mutex);
6182 
6183 			/*
6184 			 * lun 0 should always respond to inquiry, so
6185 			 * get the LUN struct for LUN 0
6186 			 *
6187 			 * Currently we deal with first level of addressing.
6188 			 * If / when we start supporting 0x device types
6189 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6190 			 * this logic will need revisiting.
6191 			 */
6192 			lun0_newalloc = 0;
6193 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6194 				/*
6195 				 * no LUN struct for LUN 0 yet exists,
6196 				 * so create one
6197 				 */
6198 				plun = fcp_alloc_lun(ptgt);
6199 				if (plun == NULL) {
6200 					fcp_log(CE_WARN, pptr->port_dip,
6201 					    "!Failed to allocate lun 0 for"
6202 					    " D_ID=%x", ptgt->tgt_d_id);
6203 					goto fail;
6204 				}
6205 				lun0_newalloc = 1;
6206 			}
6207 
6208 			/* fill in LUN info */
6209 			mutex_enter(&ptgt->tgt_mutex);
6210 			/*
6211 			 * consider lun 0 as device not connected if it is
6212 			 * offlined or newly allocated
6213 			 */
6214 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6215 			    lun0_newalloc) {
6216 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6217 			}
6218 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6219 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6220 			ptgt->tgt_lun_cnt = 1;
6221 			ptgt->tgt_report_lun_cnt = 0;
6222 			mutex_exit(&ptgt->tgt_mutex);
6223 
6224 			/* Retrieve the rscn count (if a valid one exists) */
6225 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6226 				rscn_count = ((fc_ulp_rscn_info_t *)
6227 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6228 				    ->ulp_rscn_count;
6229 			} else {
6230 				rscn_count = FC_INVALID_RSCN_COUNT;
6231 			}
6232 
6233 			/* send Report Lun request to target */
6234 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6235 			    sizeof (struct fcp_reportlun_resp),
6236 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6237 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6238 				mutex_enter(&pptr->port_mutex);
6239 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6240 					fcp_log(CE_WARN, pptr->port_dip,
6241 					    "!Failed to send REPORT LUN to"
6242 					    "  D_ID=%x", ptgt->tgt_d_id);
6243 				} else {
6244 					FCP_TRACE(fcp_logq,
6245 					    pptr->port_instbuf, fcp_trace,
6246 					    FCP_BUF_LEVEL_5, 0,
6247 					    "fcp_icmd_callback,2:state change"
6248 					    " occured for D_ID=0x%x",
6249 					    ptgt->tgt_d_id);
6250 				}
6251 				mutex_exit(&pptr->port_mutex);
6252 
6253 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6254 				    FCP_TGT_TRACE_19);
6255 
6256 				goto fail;
6257 			} else {
6258 				free_pkt = 0;
6259 				fcp_icmd_free(pptr, icmd);
6260 			}
6261 			break;
6262 
6263 		default:
6264 			fcp_log(CE_WARN, pptr->port_dip,
6265 			    "!fcp_icmd_callback Invalid opcode");
6266 			goto fail;
6267 		}
6268 
6269 		return;
6270 	}
6271 
6272 
6273 	/*
6274 	 * Other PLOGI failures are not retried as the
6275 	 * transport does it already
6276 	 */
6277 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6278 		if (fcp_is_retryable(icmd) &&
6279 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6280 
6281 			if (FCP_MUST_RETRY(fpkt)) {
6282 				fcp_queue_ipkt(pptr, fpkt);
6283 				return;
6284 			}
6285 
6286 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6287 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6288 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6289 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6290 			    fpkt->pkt_reason);
6291 
6292 			/*
6293 			 * Retry by recalling the routine that
6294 			 * originally queued this packet
6295 			 */
6296 			mutex_enter(&pptr->port_mutex);
6297 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6298 				caddr_t msg;
6299 
6300 				mutex_exit(&pptr->port_mutex);
6301 
6302 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6303 
6304 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6305 					fpkt->pkt_timeout +=
6306 					    FCP_TIMEOUT_DELTA;
6307 				}
6308 
6309 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6310 				    fpkt);
6311 				if (rval == FC_SUCCESS) {
6312 					return;
6313 				}
6314 
6315 				if (rval == FC_STATEC_BUSY ||
6316 				    rval == FC_OFFLINE) {
6317 					fcp_queue_ipkt(pptr, fpkt);
6318 					return;
6319 				}
6320 				(void) fc_ulp_error(rval, &msg);
6321 
6322 				fcp_log(CE_NOTE, pptr->port_dip,
6323 				    "!ELS 0x%x failed to d_id=0x%x;"
6324 				    " %s", icmd->ipkt_opcode,
6325 				    ptgt->tgt_d_id, msg);
6326 			} else {
6327 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6328 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6329 				    "fcp_icmd_callback,3: state change "
6330 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6331 				mutex_exit(&pptr->port_mutex);
6332 			}
6333 		}
6334 	} else {
6335 		if (fcp_is_retryable(icmd) &&
6336 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6337 			if (FCP_MUST_RETRY(fpkt)) {
6338 				fcp_queue_ipkt(pptr, fpkt);
6339 				return;
6340 			}
6341 		}
6342 		mutex_enter(&pptr->port_mutex);
6343 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6344 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6345 			mutex_exit(&pptr->port_mutex);
6346 			fcp_print_error(fpkt);
6347 		} else {
6348 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6349 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6350 			    "fcp_icmd_callback,4: state change occured"
6351 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6352 			mutex_exit(&pptr->port_mutex);
6353 		}
6354 	}
6355 
6356 fail:
6357 	if (free_pkt) {
6358 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6359 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6360 		fcp_icmd_free(pptr, icmd);
6361 	}
6362 }
6363 
6364 
6365 /*
6366  * called internally to send an info cmd using the transport
6367  *
6368  * sends either an INQ or a REPORT_LUN
6369  *
6370  * when the packet is completed fcp_scsi_callback is called
6371  */
6372 static int
6373 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6374     int lcount, int tcount, int cause, uint32_t rscn_count)
6375 {
6376 	int			nodma;
6377 	struct fcp_ipkt		*icmd;
6378 	struct fcp_tgt		*ptgt;
6379 	struct fcp_port		*pptr;
6380 	fc_frame_hdr_t		*hp;
6381 	fc_packet_t		*fpkt;
6382 	struct fcp_cmd		fcp_cmd;
6383 	struct fcp_cmd		*fcmd;
6384 	union scsi_cdb		*scsi_cdb;
6385 
6386 	ASSERT(plun != NULL);
6387 
6388 	ptgt = plun->lun_tgt;
6389 	ASSERT(ptgt != NULL);
6390 
6391 	pptr = ptgt->tgt_port;
6392 	ASSERT(pptr != NULL);
6393 
6394 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6395 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6396 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6397 
6398 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6399 
6400 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6401 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6402 	    rscn_count);
6403 
6404 	if (icmd == NULL) {
6405 		return (DDI_FAILURE);
6406 	}
6407 
6408 	fpkt = icmd->ipkt_fpkt;
6409 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6410 	icmd->ipkt_retries = 0;
6411 	icmd->ipkt_opcode = opcode;
6412 	icmd->ipkt_lun = plun;
6413 
6414 	if (nodma) {
6415 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6416 	} else {
6417 		fcmd = &fcp_cmd;
6418 	}
6419 	bzero(fcmd, sizeof (struct fcp_cmd));
6420 
6421 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6422 
6423 	hp = &fpkt->pkt_cmd_fhdr;
6424 
6425 	hp->s_id = pptr->port_id;
6426 	hp->d_id = ptgt->tgt_d_id;
6427 	hp->r_ctl = R_CTL_COMMAND;
6428 	hp->type = FC_TYPE_SCSI_FCP;
6429 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6430 	hp->rsvd = 0;
6431 	hp->seq_id = 0;
6432 	hp->seq_cnt = 0;
6433 	hp->ox_id = 0xffff;
6434 	hp->rx_id = 0xffff;
6435 	hp->ro = 0;
6436 
6437 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6438 
6439 	/*
6440 	 * Request SCSI target for expedited processing
6441 	 */
6442 
6443 	/*
6444 	 * Set up for untagged queuing because we do not
6445 	 * know if the fibre device supports queuing.
6446 	 */
6447 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6448 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6449 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6450 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6451 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6452 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6453 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6454 
6455 	switch (opcode) {
6456 	case SCMD_INQUIRY_PAGE83:
6457 		/*
6458 		 * Prepare to get the Inquiry VPD page 83 information
6459 		 */
6460 		fcmd->fcp_cntl.cntl_read_data = 1;
6461 		fcmd->fcp_cntl.cntl_write_data = 0;
6462 		fcmd->fcp_data_len = alloc_len;
6463 
6464 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6465 		fpkt->pkt_comp = fcp_scsi_callback;
6466 
6467 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6468 		scsi_cdb->g0_addr2 = 0x01;
6469 		scsi_cdb->g0_addr1 = 0x83;
6470 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6471 		break;
6472 
6473 	case SCMD_INQUIRY:
6474 		fcmd->fcp_cntl.cntl_read_data = 1;
6475 		fcmd->fcp_cntl.cntl_write_data = 0;
6476 		fcmd->fcp_data_len = alloc_len;
6477 
6478 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6479 		fpkt->pkt_comp = fcp_scsi_callback;
6480 
6481 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6482 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6483 		break;
6484 
6485 	case SCMD_REPORT_LUN: {
6486 		fc_portid_t	d_id;
6487 		opaque_t	fca_dev;
6488 
6489 		ASSERT(alloc_len >= 16);
6490 
6491 		d_id.priv_lilp_posit = 0;
6492 		d_id.port_id = ptgt->tgt_d_id;
6493 
6494 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6495 
6496 		mutex_enter(&ptgt->tgt_mutex);
6497 		ptgt->tgt_fca_dev = fca_dev;
6498 		mutex_exit(&ptgt->tgt_mutex);
6499 
6500 		fcmd->fcp_cntl.cntl_read_data = 1;
6501 		fcmd->fcp_cntl.cntl_write_data = 0;
6502 		fcmd->fcp_data_len = alloc_len;
6503 
6504 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6505 		fpkt->pkt_comp = fcp_scsi_callback;
6506 
6507 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6508 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6509 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6510 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6511 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6512 		break;
6513 	}
6514 
6515 	default:
6516 		fcp_log(CE_WARN, pptr->port_dip,
6517 		    "!fcp_send_scsi Invalid opcode");
6518 		break;
6519 	}
6520 
6521 	if (!nodma) {
6522 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6523 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6524 	}
6525 
6526 	mutex_enter(&pptr->port_mutex);
6527 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6528 
6529 		mutex_exit(&pptr->port_mutex);
6530 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6531 		    FC_SUCCESS) {
6532 			fcp_icmd_free(pptr, icmd);
6533 			return (DDI_FAILURE);
6534 		}
6535 		return (DDI_SUCCESS);
6536 	} else {
6537 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6538 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6539 		    "fcp_send_scsi,1: state change occured"
6540 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6541 		mutex_exit(&pptr->port_mutex);
6542 		fcp_icmd_free(pptr, icmd);
6543 		return (DDI_FAILURE);
6544 	}
6545 }
6546 
6547 
6548 /*
6549  * called by fcp_scsi_callback to check to handle the case where
6550  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6551  */
6552 static int
6553 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6554 {
6555 	uchar_t				rqlen;
6556 	int				rval = DDI_FAILURE;
6557 	struct scsi_extended_sense	sense_info, *sense;
6558 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6559 	    fpkt->pkt_ulp_private;
6560 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6561 	struct fcp_port		*pptr = ptgt->tgt_port;
6562 
6563 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6564 
6565 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6566 		/*
6567 		 * SCSI-II Reserve Release support. Some older FC drives return
6568 		 * Reservation conflict for Report Luns command.
6569 		 */
6570 		if (icmd->ipkt_nodma) {
6571 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6572 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6573 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6574 		} else {
6575 			fcp_rsp_t	new_resp;
6576 
6577 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6578 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6579 
6580 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6581 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6582 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6583 
6584 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6585 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6586 		}
6587 
6588 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6589 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6590 
6591 		return (DDI_SUCCESS);
6592 	}
6593 
6594 	sense = &sense_info;
6595 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6596 		/* no need to continue if sense length is not set */
6597 		return (rval);
6598 	}
6599 
6600 	/* casting 64-bit integer to 8-bit */
6601 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6602 	    sizeof (struct scsi_extended_sense));
6603 
6604 	if (rqlen < 14) {
6605 		/* no need to continue if request length isn't long enough */
6606 		return (rval);
6607 	}
6608 
6609 	if (icmd->ipkt_nodma) {
6610 		/*
6611 		 * We can safely use fcp_response_len here since the
6612 		 * only path that calls fcp_check_reportlun,
6613 		 * fcp_scsi_callback, has already called
6614 		 * fcp_validate_fcp_response.
6615 		 */
6616 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6617 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6618 	} else {
6619 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6620 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6621 		    sizeof (struct scsi_extended_sense));
6622 	}
6623 
6624 	if (!FCP_SENSE_NO_LUN(sense)) {
6625 		mutex_enter(&ptgt->tgt_mutex);
6626 		/* clear the flag if any */
6627 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6628 		mutex_exit(&ptgt->tgt_mutex);
6629 	}
6630 
6631 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6632 	    (sense->es_add_code == 0x20)) {
6633 		if (icmd->ipkt_nodma) {
6634 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6635 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6636 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6637 		} else {
6638 			fcp_rsp_t	new_resp;
6639 
6640 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6641 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6642 
6643 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6644 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6645 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6646 
6647 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6648 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6649 		}
6650 
6651 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6652 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6653 
6654 		return (DDI_SUCCESS);
6655 	}
6656 
6657 	/*
6658 	 * This is for the STK library which returns a check condition,
6659 	 * to indicate device is not ready, manual assistance needed.
6660 	 * This is to a report lun command when the door is open.
6661 	 */
6662 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6663 		if (icmd->ipkt_nodma) {
6664 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6665 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6666 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6667 		} else {
6668 			fcp_rsp_t	new_resp;
6669 
6670 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6671 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6672 
6673 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6674 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6675 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6676 
6677 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6678 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6679 		}
6680 
6681 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6682 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6683 
6684 		return (DDI_SUCCESS);
6685 	}
6686 
6687 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6688 	    (FCP_SENSE_NO_LUN(sense))) {
6689 		mutex_enter(&ptgt->tgt_mutex);
6690 		if ((FCP_SENSE_NO_LUN(sense)) &&
6691 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6692 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6693 			mutex_exit(&ptgt->tgt_mutex);
6694 			/*
6695 			 * reconfig was triggred by ILLEGAL REQUEST but
6696 			 * got ILLEGAL REQUEST again
6697 			 */
6698 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6699 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6700 			    "!FCP: Unable to obtain Report Lun data"
6701 			    " target=%x", ptgt->tgt_d_id);
6702 		} else {
6703 			if (ptgt->tgt_tid == NULL) {
6704 				timeout_id_t	tid;
6705 				/*
6706 				 * REPORT LUN data has changed.	 Kick off
6707 				 * rediscovery
6708 				 */
6709 				tid = timeout(fcp_reconfigure_luns,
6710 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6711 
6712 				ptgt->tgt_tid = tid;
6713 				ptgt->tgt_state |= FCP_TGT_BUSY;
6714 			}
6715 			if (FCP_SENSE_NO_LUN(sense)) {
6716 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6717 			}
6718 			mutex_exit(&ptgt->tgt_mutex);
6719 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6720 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6721 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6722 				    "!FCP:Report Lun Has Changed"
6723 				    " target=%x", ptgt->tgt_d_id);
6724 			} else if (FCP_SENSE_NO_LUN(sense)) {
6725 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6726 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6727 				    "!FCP:LU Not Supported"
6728 				    " target=%x", ptgt->tgt_d_id);
6729 			}
6730 		}
6731 		rval = DDI_SUCCESS;
6732 	}
6733 
6734 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6735 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6736 	    "D_ID=%x, sense=%x, status=%x",
6737 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6738 	    rsp->fcp_u.fcp_status.scsi_status);
6739 
6740 	return (rval);
6741 }
6742 
6743 /*
6744  *     Function: fcp_scsi_callback
6745  *
6746  *  Description: This is the callback routine set by fcp_send_scsi() after
6747  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6748  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6749  *		 INQUIRY_PAGE83.
6750  *
6751  *     Argument: *fpkt	 FC packet used to convey the command
6752  *
6753  * Return Value: None
6754  */
6755 static void
6756 fcp_scsi_callback(fc_packet_t *fpkt)
6757 {
6758 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6759 	    fpkt->pkt_ulp_private;
6760 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6761 	struct fcp_port	*pptr;
6762 	struct fcp_tgt	*ptgt;
6763 	struct fcp_lun	*plun;
6764 	struct fcp_rsp		response, *rsp;
6765 
6766 	if (icmd->ipkt_nodma) {
6767 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6768 	} else {
6769 		rsp = &response;
6770 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6771 		    sizeof (struct fcp_rsp));
6772 	}
6773 
6774 	ptgt = icmd->ipkt_tgt;
6775 	pptr = ptgt->tgt_port;
6776 	plun = icmd->ipkt_lun;
6777 
6778 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6779 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6780 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6781 	    "status=%x, lun num=%x",
6782 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6783 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6784 
6785 	/*
6786 	 * Pre-init LUN GUID with NWWN if it is not a device that
6787 	 * supports multiple luns and we know it's not page83
6788 	 * compliant.  Although using a NWWN is not lun unique,
6789 	 * we will be fine since there is only one lun behind the taget
6790 	 * in this case.
6791 	 */
6792 	if ((plun->lun_guid_size == 0) &&
6793 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6794 	    (fcp_symmetric_device_probe(plun) == 0)) {
6795 
6796 		char ascii_wwn[FC_WWN_SIZE*2+1];
6797 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6798 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6799 	}
6800 
6801 	/*
6802 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6803 	 * when thay have more data than what is asked in CDB. An overrun
6804 	 * is really when FCP_DL is smaller than the data length in CDB.
6805 	 * In the case here we know that REPORT LUN command we formed within
6806 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6807 	 * behavior. In reality this is FC_SUCCESS.
6808 	 */
6809 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6810 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6811 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6812 		fpkt->pkt_state = FC_PKT_SUCCESS;
6813 	}
6814 
6815 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6816 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6817 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6818 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6819 		    ptgt->tgt_d_id);
6820 
6821 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6822 			/*
6823 			 * Inquiry VPD page command on A5K SES devices would
6824 			 * result in data CRC errors.
6825 			 */
6826 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6827 				(void) fcp_handle_page83(fpkt, icmd, 1);
6828 				return;
6829 			}
6830 		}
6831 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6832 		    FCP_MUST_RETRY(fpkt)) {
6833 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6834 			fcp_retry_scsi_cmd(fpkt);
6835 			return;
6836 		}
6837 
6838 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6839 		    FCP_TGT_TRACE_20);
6840 
6841 		mutex_enter(&pptr->port_mutex);
6842 		mutex_enter(&ptgt->tgt_mutex);
6843 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6844 			mutex_exit(&ptgt->tgt_mutex);
6845 			mutex_exit(&pptr->port_mutex);
6846 			fcp_print_error(fpkt);
6847 		} else {
6848 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6849 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6850 			    "fcp_scsi_callback,1: state change occured"
6851 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6852 			mutex_exit(&ptgt->tgt_mutex);
6853 			mutex_exit(&pptr->port_mutex);
6854 		}
6855 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6856 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6857 		fcp_icmd_free(pptr, icmd);
6858 		return;
6859 	}
6860 
6861 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6862 
6863 	mutex_enter(&pptr->port_mutex);
6864 	mutex_enter(&ptgt->tgt_mutex);
6865 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6866 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6867 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6868 		    "fcp_scsi_callback,2: state change occured"
6869 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6870 		mutex_exit(&ptgt->tgt_mutex);
6871 		mutex_exit(&pptr->port_mutex);
6872 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6873 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6874 		fcp_icmd_free(pptr, icmd);
6875 		return;
6876 	}
6877 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6878 
6879 	mutex_exit(&ptgt->tgt_mutex);
6880 	mutex_exit(&pptr->port_mutex);
6881 
6882 	if (icmd->ipkt_nodma) {
6883 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
6884 		    sizeof (struct fcp_rsp));
6885 	} else {
6886 		bep = &fcp_rsp_err;
6887 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
6888 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
6889 	}
6890 
6891 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
6892 		fcp_retry_scsi_cmd(fpkt);
6893 		return;
6894 	}
6895 
6896 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
6897 	    FCP_NO_FAILURE) {
6898 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6899 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6900 		    "rsp_code=0x%x, rsp_len_set=0x%x",
6901 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
6902 		fcp_retry_scsi_cmd(fpkt);
6903 		return;
6904 	}
6905 
6906 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
6907 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
6908 		fcp_queue_ipkt(pptr, fpkt);
6909 		return;
6910 	}
6911 
6912 	/*
6913 	 * Devices that do not support INQUIRY_PAGE83, return check condition
6914 	 * with illegal request as per SCSI spec.
6915 	 * Crossbridge is one such device and Daktari's SES node is another.
6916 	 * We want to ideally enumerate these devices as a non-mpxio devices.
6917 	 * SES nodes (Daktari only currently) are an exception to this.
6918 	 */
6919 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6920 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
6921 
6922 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6923 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
6924 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
6925 		    "check condition. May enumerate as non-mpxio device",
6926 		    ptgt->tgt_d_id, plun->lun_type);
6927 
6928 		/*
6929 		 * If we let Daktari's SES be enumerated as a non-mpxio
6930 		 * device, there will be a discrepency in that the other
6931 		 * internal FC disks will get enumerated as mpxio devices.
6932 		 * Applications like luxadm expect this to be consistent.
6933 		 *
6934 		 * So, we put in a hack here to check if this is an SES device
6935 		 * and handle it here.
6936 		 */
6937 		if (plun->lun_type == DTYPE_ESI) {
6938 			/*
6939 			 * Since, pkt_state is actually FC_PKT_SUCCESS
6940 			 * at this stage, we fake a failure here so that
6941 			 * fcp_handle_page83 will create a device path using
6942 			 * the WWN instead of the GUID which is not there anyway
6943 			 */
6944 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
6945 			(void) fcp_handle_page83(fpkt, icmd, 1);
6946 			return;
6947 		}
6948 
6949 		mutex_enter(&ptgt->tgt_mutex);
6950 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
6951 		    FCP_LUN_MARK | FCP_LUN_BUSY);
6952 		mutex_exit(&ptgt->tgt_mutex);
6953 
6954 		(void) fcp_call_finish_init(pptr, ptgt,
6955 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6956 		    icmd->ipkt_cause);
6957 		fcp_icmd_free(pptr, icmd);
6958 		return;
6959 	}
6960 
6961 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6962 		int rval = DDI_FAILURE;
6963 
6964 		/*
6965 		 * handle cases where report lun isn't supported
6966 		 * by faking up our own REPORT_LUN response or
6967 		 * UNIT ATTENTION
6968 		 */
6969 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
6970 			rval = fcp_check_reportlun(rsp, fpkt);
6971 
6972 			/*
6973 			 * fcp_check_reportlun might have modified the
6974 			 * FCP response. Copy it in again to get an updated
6975 			 * FCP response
6976 			 */
6977 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
6978 				rsp = &response;
6979 
6980 				FCP_CP_IN(fpkt->pkt_resp, rsp,
6981 				    fpkt->pkt_resp_acc,
6982 				    sizeof (struct fcp_rsp));
6983 			}
6984 		}
6985 
6986 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6987 			if (rval == DDI_SUCCESS) {
6988 				(void) fcp_call_finish_init(pptr, ptgt,
6989 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6990 				    icmd->ipkt_cause);
6991 				fcp_icmd_free(pptr, icmd);
6992 			} else {
6993 				fcp_retry_scsi_cmd(fpkt);
6994 			}
6995 
6996 			return;
6997 		}
6998 	} else {
6999 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7000 			mutex_enter(&ptgt->tgt_mutex);
7001 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7002 			mutex_exit(&ptgt->tgt_mutex);
7003 		}
7004 	}
7005 
7006 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7007 
7008 	(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU);
7009 
7010 	switch (icmd->ipkt_opcode) {
7011 	case SCMD_INQUIRY:
7012 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7013 		fcp_handle_inquiry(fpkt, icmd);
7014 		break;
7015 
7016 	case SCMD_REPORT_LUN:
7017 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7018 		    FCP_TGT_TRACE_22);
7019 		fcp_handle_reportlun(fpkt, icmd);
7020 		break;
7021 
7022 	case SCMD_INQUIRY_PAGE83:
7023 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7024 		(void) fcp_handle_page83(fpkt, icmd, 0);
7025 		break;
7026 
7027 	default:
7028 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7029 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7030 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7031 		fcp_icmd_free(pptr, icmd);
7032 		break;
7033 	}
7034 }
7035 
7036 
7037 static void
7038 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7039 {
7040 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7041 	    fpkt->pkt_ulp_private;
7042 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7043 	struct fcp_port	*pptr = ptgt->tgt_port;
7044 
7045 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7046 	    fcp_is_retryable(icmd)) {
7047 		mutex_enter(&pptr->port_mutex);
7048 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7049 			mutex_exit(&pptr->port_mutex);
7050 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7051 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7052 			    "Retrying %s to %x; state=%x, reason=%x",
7053 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7054 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7055 			    fpkt->pkt_state, fpkt->pkt_reason);
7056 
7057 			fcp_queue_ipkt(pptr, fpkt);
7058 		} else {
7059 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7060 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7061 			    "fcp_retry_scsi_cmd,1: state change occured"
7062 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7063 			mutex_exit(&pptr->port_mutex);
7064 			(void) fcp_call_finish_init(pptr, ptgt,
7065 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7066 			    icmd->ipkt_cause);
7067 			fcp_icmd_free(pptr, icmd);
7068 		}
7069 	} else {
7070 		fcp_print_error(fpkt);
7071 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7072 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7073 		fcp_icmd_free(pptr, icmd);
7074 	}
7075 }
7076 
7077 /*
7078  *     Function: fcp_handle_page83
7079  *
7080  *  Description: Treats the response to INQUIRY_PAGE83.
7081  *
7082  *     Argument: *fpkt	FC packet used to convey the command.
7083  *		 *icmd	Original fcp_ipkt structure.
7084  *		 ignore_page83_data
7085  *			if it's 1, that means it's a special devices's
7086  *			page83 response, it should be enumerated under mpxio
7087  *
7088  * Return Value: None
7089  */
7090 static void
7091 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7092     int ignore_page83_data)
7093 {
7094 	struct fcp_port	*pptr;
7095 	struct fcp_lun	*plun;
7096 	struct fcp_tgt	*ptgt;
7097 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7098 	int			fail = 0;
7099 	ddi_devid_t		devid;
7100 	char			*guid = NULL;
7101 	int			ret;
7102 
7103 	ASSERT(icmd != NULL && fpkt != NULL);
7104 
7105 	pptr = icmd->ipkt_port;
7106 	ptgt = icmd->ipkt_tgt;
7107 	plun = icmd->ipkt_lun;
7108 
7109 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7110 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7111 
7112 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7113 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7114 
7115 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7116 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7117 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7118 		    "dtype=0x%x, lun num=%x",
7119 		    pptr->port_instance, ptgt->tgt_d_id,
7120 		    dev_id_page[0], plun->lun_num);
7121 
7122 		ret = ddi_devid_scsi_encode(
7123 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7124 		    NULL,		/* driver name */
7125 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7126 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7127 		    NULL,		/* page 80 data */
7128 		    0,		/* page 80 len */
7129 		    dev_id_page,	/* page 83 data */
7130 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7131 		    &devid);
7132 
7133 		if (ret == DDI_SUCCESS) {
7134 
7135 			guid = ddi_devid_to_guid(devid);
7136 
7137 			if (guid) {
7138 				/*
7139 				 * Check our current guid.  If it's non null
7140 				 * and it has changed, we need to copy it into
7141 				 * lun_old_guid since we might still need it.
7142 				 */
7143 				if (plun->lun_guid &&
7144 				    strcmp(guid, plun->lun_guid)) {
7145 					unsigned int len;
7146 
7147 					/*
7148 					 * If the guid of the LUN changes,
7149 					 * reconfiguration should be triggered
7150 					 * to reflect the changes.
7151 					 * i.e. we should offline the LUN with
7152 					 * the old guid, and online the LUN with
7153 					 * the new guid.
7154 					 */
7155 					plun->lun_state |= FCP_LUN_CHANGED;
7156 
7157 					if (plun->lun_old_guid) {
7158 						kmem_free(plun->lun_old_guid,
7159 						    plun->lun_old_guid_size);
7160 					}
7161 
7162 					len = plun->lun_guid_size;
7163 					plun->lun_old_guid_size = len;
7164 
7165 					plun->lun_old_guid = kmem_zalloc(len,
7166 					    KM_NOSLEEP);
7167 
7168 					if (plun->lun_old_guid) {
7169 						/*
7170 						 * The alloc was successful then
7171 						 * let's do the copy.
7172 						 */
7173 						bcopy(plun->lun_guid,
7174 						    plun->lun_old_guid, len);
7175 					} else {
7176 						fail = 1;
7177 						plun->lun_old_guid_size = 0;
7178 					}
7179 				}
7180 				if (!fail) {
7181 					if (fcp_copy_guid_2_lun_block(
7182 					    plun, guid)) {
7183 						fail = 1;
7184 					}
7185 				}
7186 				ddi_devid_free_guid(guid);
7187 
7188 			} else {
7189 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7190 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7191 				    "fcp_handle_page83: unable to create "
7192 				    "GUID");
7193 
7194 				/* couldn't create good guid from devid */
7195 				fail = 1;
7196 			}
7197 			ddi_devid_free(devid);
7198 
7199 		} else if (ret == DDI_NOT_WELL_FORMED) {
7200 			/* NULL filled data for page 83 */
7201 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7202 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7203 			    "fcp_handle_page83: retry GUID");
7204 
7205 			icmd->ipkt_retries = 0;
7206 			fcp_retry_scsi_cmd(fpkt);
7207 			return;
7208 		} else {
7209 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7210 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7211 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7212 			    ret);
7213 			/*
7214 			 * Since the page83 validation
7215 			 * introduced late, we are being
7216 			 * tolerant to the existing devices
7217 			 * that already found to be working
7218 			 * under mpxio, like A5200's SES device,
7219 			 * its page83 response will not be standard-compliant,
7220 			 * but we still want it to be enumerated under mpxio.
7221 			 */
7222 			if (fcp_symmetric_device_probe(plun) != 0) {
7223 				fail = 1;
7224 			}
7225 		}
7226 
7227 	} else {
7228 		/* bad packet state */
7229 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7230 
7231 		/*
7232 		 * For some special devices (A5K SES and Daktari's SES devices),
7233 		 * they should be enumerated under mpxio
7234 		 * or "luxadm dis" will fail
7235 		 */
7236 		if (ignore_page83_data) {
7237 			fail = 0;
7238 		} else {
7239 			fail = 1;
7240 		}
7241 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7242 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7243 		    "!Devid page cmd failed. "
7244 		    "fpkt_state: %x fpkt_reason: %x",
7245 		    "ignore_page83: %d",
7246 		    fpkt->pkt_state, fpkt->pkt_reason,
7247 		    ignore_page83_data);
7248 	}
7249 
7250 	mutex_enter(&pptr->port_mutex);
7251 	mutex_enter(&plun->lun_mutex);
7252 	/*
7253 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7254 	 * mismatch between lun_cip and lun_mpxio.
7255 	 */
7256 	if (plun->lun_cip == NULL) {
7257 		/*
7258 		 * If we don't have a guid for this lun it's because we were
7259 		 * unable to glean one from the page 83 response.  Set the
7260 		 * control flag to 0 here to make sure that we don't attempt to
7261 		 * enumerate it under mpxio.
7262 		 */
7263 		if (fail || pptr->port_mpxio == 0) {
7264 			plun->lun_mpxio = 0;
7265 		} else {
7266 			plun->lun_mpxio = 1;
7267 		}
7268 	}
7269 	mutex_exit(&plun->lun_mutex);
7270 	mutex_exit(&pptr->port_mutex);
7271 
7272 	mutex_enter(&ptgt->tgt_mutex);
7273 	plun->lun_state &=
7274 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7275 	mutex_exit(&ptgt->tgt_mutex);
7276 
7277 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7278 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7279 
7280 	fcp_icmd_free(pptr, icmd);
7281 }
7282 
7283 /*
7284  *     Function: fcp_handle_inquiry
7285  *
7286  *  Description: Called by fcp_scsi_callback to handle the response to an
7287  *		 INQUIRY request.
7288  *
7289  *     Argument: *fpkt	FC packet used to convey the command.
7290  *		 *icmd	Original fcp_ipkt structure.
7291  *
7292  * Return Value: None
7293  */
7294 static void
7295 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7296 {
7297 	struct fcp_port	*pptr;
7298 	struct fcp_lun	*plun;
7299 	struct fcp_tgt	*ptgt;
7300 	uchar_t		dtype;
7301 	uchar_t		pqual;
7302 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7303 
7304 	ASSERT(icmd != NULL && fpkt != NULL);
7305 
7306 	pptr = icmd->ipkt_port;
7307 	ptgt = icmd->ipkt_tgt;
7308 	plun = icmd->ipkt_lun;
7309 
7310 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7311 	    sizeof (struct scsi_inquiry));
7312 
7313 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7314 	pqual = plun->lun_inq.inq_dtype >> 5;
7315 
7316 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7317 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7318 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7319 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7320 	    plun->lun_num, dtype, pqual);
7321 
7322 	if (pqual != 0) {
7323 		/*
7324 		 * Non-zero peripheral qualifier
7325 		 */
7326 		fcp_log(CE_CONT, pptr->port_dip,
7327 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7328 		    "Device type=0x%x Peripheral qual=0x%x\n",
7329 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7330 
7331 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7332 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7333 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7334 		    "Device type=0x%x Peripheral qual=0x%x\n",
7335 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7336 
7337 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7338 
7339 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7340 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7341 		fcp_icmd_free(pptr, icmd);
7342 		return;
7343 	}
7344 
7345 	/*
7346 	 * If the device is already initialized, check the dtype
7347 	 * for a change. If it has changed then update the flags
7348 	 * so the create_luns will offline the old device and
7349 	 * create the new device. Refer to bug: 4764752
7350 	 */
7351 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7352 		plun->lun_state |= FCP_LUN_CHANGED;
7353 	}
7354 	plun->lun_type = plun->lun_inq.inq_dtype;
7355 
7356 	/*
7357 	 * This code is setting/initializing the throttling in the FCA
7358 	 * driver.
7359 	 */
7360 	mutex_enter(&pptr->port_mutex);
7361 	if (!pptr->port_notify) {
7362 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7363 			uint32_t cmd = 0;
7364 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7365 			    ((cmd & 0xFFFFFF00 >> 8) |
7366 			    FCP_SVE_THROTTLE << 8));
7367 			pptr->port_notify = 1;
7368 			mutex_exit(&pptr->port_mutex);
7369 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7370 			mutex_enter(&pptr->port_mutex);
7371 		}
7372 	}
7373 
7374 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7375 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7376 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7377 		    "fcp_handle_inquiry,1:state change occured"
7378 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7379 		mutex_exit(&pptr->port_mutex);
7380 
7381 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7382 		(void) fcp_call_finish_init(pptr, ptgt,
7383 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7384 		    icmd->ipkt_cause);
7385 		fcp_icmd_free(pptr, icmd);
7386 		return;
7387 	}
7388 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7389 	mutex_exit(&pptr->port_mutex);
7390 
7391 	/* Retrieve the rscn count (if a valid one exists) */
7392 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7393 		rscn_count = ((fc_ulp_rscn_info_t *)
7394 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7395 	} else {
7396 		rscn_count = FC_INVALID_RSCN_COUNT;
7397 	}
7398 
7399 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7400 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7401 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7402 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7403 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7404 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7405 		(void) fcp_call_finish_init(pptr, ptgt,
7406 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7407 		    icmd->ipkt_cause);
7408 	}
7409 
7410 	/*
7411 	 * Read Inquiry VPD Page 0x83 to uniquely
7412 	 * identify this logical unit.
7413 	 */
7414 	fcp_icmd_free(pptr, icmd);
7415 }
7416 
7417 /*
7418  *     Function: fcp_handle_reportlun
7419  *
7420  *  Description: Called by fcp_scsi_callback to handle the response to a
7421  *		 REPORT_LUN request.
7422  *
7423  *     Argument: *fpkt	FC packet used to convey the command.
7424  *		 *icmd	Original fcp_ipkt structure.
7425  *
7426  * Return Value: None
7427  */
7428 static void
7429 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7430 {
7431 	int				i;
7432 	int				nluns_claimed;
7433 	int				nluns_bufmax;
7434 	int				len;
7435 	uint16_t			lun_num;
7436 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7437 	struct fcp_port			*pptr;
7438 	struct fcp_tgt			*ptgt;
7439 	struct fcp_lun			*plun;
7440 	struct fcp_reportlun_resp	*report_lun;
7441 
7442 	pptr = icmd->ipkt_port;
7443 	ptgt = icmd->ipkt_tgt;
7444 	len = fpkt->pkt_datalen;
7445 
7446 	if ((len < FCP_LUN_HEADER) ||
7447 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7448 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7449 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7450 		fcp_icmd_free(pptr, icmd);
7451 		return;
7452 	}
7453 
7454 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7455 	    fpkt->pkt_datalen);
7456 
7457 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7460 	    pptr->port_instance, ptgt->tgt_d_id);
7461 
7462 	/*
7463 	 * Get the number of luns (which is supplied as LUNS * 8) the
7464 	 * device claims it has.
7465 	 */
7466 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7467 
7468 	/*
7469 	 * Get the maximum number of luns the buffer submitted can hold.
7470 	 */
7471 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7472 
7473 	/*
7474 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7475 	 */
7476 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7477 		kmem_free(report_lun, len);
7478 
7479 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7480 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7481 		    ptgt->tgt_d_id);
7482 
7483 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7484 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7485 		fcp_icmd_free(pptr, icmd);
7486 		return;
7487 	}
7488 
7489 	/*
7490 	 * If there are more LUNs than we have allocated memory for,
7491 	 * allocate more space and send down yet another report lun if
7492 	 * the maximum number of attempts hasn't been reached.
7493 	 */
7494 	mutex_enter(&ptgt->tgt_mutex);
7495 
7496 	if ((nluns_claimed > nluns_bufmax) &&
7497 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7498 
7499 		struct fcp_lun *plun;
7500 
7501 		ptgt->tgt_report_lun_cnt++;
7502 		plun = ptgt->tgt_lun;
7503 		ASSERT(plun != NULL);
7504 		mutex_exit(&ptgt->tgt_mutex);
7505 
7506 		kmem_free(report_lun, len);
7507 
7508 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7509 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7510 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7511 		    nluns_claimed, ptgt->tgt_d_id);
7512 
7513 		/* Retrieve the rscn count (if a valid one exists) */
7514 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7515 			rscn_count = ((fc_ulp_rscn_info_t *)
7516 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7517 			    ulp_rscn_count;
7518 		} else {
7519 			rscn_count = FC_INVALID_RSCN_COUNT;
7520 		}
7521 
7522 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7523 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7524 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7526 			(void) fcp_call_finish_init(pptr, ptgt,
7527 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7528 			    icmd->ipkt_cause);
7529 		}
7530 
7531 		fcp_icmd_free(pptr, icmd);
7532 		return;
7533 	}
7534 
7535 	if (nluns_claimed > nluns_bufmax) {
7536 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7537 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7538 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7539 		    "	 Number of LUNs lost=%x",
7540 		    ptgt->tgt_port_wwn.raw_wwn[0],
7541 		    ptgt->tgt_port_wwn.raw_wwn[1],
7542 		    ptgt->tgt_port_wwn.raw_wwn[2],
7543 		    ptgt->tgt_port_wwn.raw_wwn[3],
7544 		    ptgt->tgt_port_wwn.raw_wwn[4],
7545 		    ptgt->tgt_port_wwn.raw_wwn[5],
7546 		    ptgt->tgt_port_wwn.raw_wwn[6],
7547 		    ptgt->tgt_port_wwn.raw_wwn[7],
7548 		    nluns_claimed - nluns_bufmax);
7549 
7550 		nluns_claimed = nluns_bufmax;
7551 	}
7552 	ptgt->tgt_lun_cnt = nluns_claimed;
7553 
7554 	/*
7555 	 * Identify missing LUNs and print warning messages
7556 	 */
7557 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7558 		int offline;
7559 		int exists = 0;
7560 
7561 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7562 
7563 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7564 			uchar_t		*lun_string;
7565 
7566 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7567 
7568 			switch (lun_string[0] & 0xC0) {
7569 			case FCP_LUN_ADDRESSING:
7570 			case FCP_PD_ADDRESSING:
7571 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7572 				    lun_string[1];
7573 				if (plun->lun_num == lun_num) {
7574 					exists++;
7575 					break;
7576 				}
7577 				break;
7578 
7579 			default:
7580 				break;
7581 			}
7582 		}
7583 
7584 		if (!exists && !offline) {
7585 			mutex_exit(&ptgt->tgt_mutex);
7586 
7587 			mutex_enter(&pptr->port_mutex);
7588 			mutex_enter(&ptgt->tgt_mutex);
7589 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7590 				/*
7591 				 * set disappear flag when device was connected
7592 				 */
7593 				if (!(plun->lun_state &
7594 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7595 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7596 				}
7597 				mutex_exit(&ptgt->tgt_mutex);
7598 				mutex_exit(&pptr->port_mutex);
7599 				if (!(plun->lun_state &
7600 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7601 					fcp_log(CE_NOTE, pptr->port_dip,
7602 					    "!Lun=%x for target=%x disappeared",
7603 					    plun->lun_num, ptgt->tgt_d_id);
7604 				}
7605 				mutex_enter(&ptgt->tgt_mutex);
7606 			} else {
7607 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7608 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7609 				    "fcp_handle_reportlun,1: state change"
7610 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7611 				mutex_exit(&ptgt->tgt_mutex);
7612 				mutex_exit(&pptr->port_mutex);
7613 				kmem_free(report_lun, len);
7614 				(void) fcp_call_finish_init(pptr, ptgt,
7615 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7616 				    icmd->ipkt_cause);
7617 				fcp_icmd_free(pptr, icmd);
7618 				return;
7619 			}
7620 		} else if (exists) {
7621 			/*
7622 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7623 			 * actually exists in REPORT_LUN response
7624 			 */
7625 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7626 				plun->lun_state &=
7627 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7628 			}
7629 			if (offline || plun->lun_num == 0) {
7630 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7631 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7632 					mutex_exit(&ptgt->tgt_mutex);
7633 					fcp_log(CE_NOTE, pptr->port_dip,
7634 					    "!Lun=%x for target=%x reappeared",
7635 					    plun->lun_num, ptgt->tgt_d_id);
7636 					mutex_enter(&ptgt->tgt_mutex);
7637 				}
7638 			}
7639 		}
7640 	}
7641 
7642 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7643 	mutex_exit(&ptgt->tgt_mutex);
7644 
7645 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7646 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7647 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7648 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7649 
7650 	/* scan each lun */
7651 	for (i = 0; i < nluns_claimed; i++) {
7652 		uchar_t	*lun_string;
7653 
7654 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7655 
7656 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7657 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7658 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7659 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7660 		    lun_string[0]);
7661 
7662 		switch (lun_string[0] & 0xC0) {
7663 		case FCP_LUN_ADDRESSING:
7664 		case FCP_PD_ADDRESSING:
7665 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7666 
7667 			/* We will skip masked LUNs because of the blacklist. */
7668 			if (fcp_lun_blacklist != NULL) {
7669 				mutex_enter(&ptgt->tgt_mutex);
7670 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7671 				    lun_num) == TRUE) {
7672 					ptgt->tgt_lun_cnt--;
7673 					mutex_exit(&ptgt->tgt_mutex);
7674 					break;
7675 				}
7676 				mutex_exit(&ptgt->tgt_mutex);
7677 			}
7678 
7679 			/* see if this LUN is already allocated */
7680 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7681 				plun = fcp_alloc_lun(ptgt);
7682 				if (plun == NULL) {
7683 					fcp_log(CE_NOTE, pptr->port_dip,
7684 					    "!Lun allocation failed"
7685 					    " target=%x lun=%x",
7686 					    ptgt->tgt_d_id, lun_num);
7687 					break;
7688 				}
7689 			}
7690 
7691 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7692 			/* convert to LUN */
7693 			plun->lun_addr.ent_addr_0 =
7694 			    BE_16(*(uint16_t *)&(lun_string[0]));
7695 			plun->lun_addr.ent_addr_1 =
7696 			    BE_16(*(uint16_t *)&(lun_string[2]));
7697 			plun->lun_addr.ent_addr_2 =
7698 			    BE_16(*(uint16_t *)&(lun_string[4]));
7699 			plun->lun_addr.ent_addr_3 =
7700 			    BE_16(*(uint16_t *)&(lun_string[6]));
7701 
7702 			plun->lun_num = lun_num;
7703 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7704 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7705 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7706 
7707 			/* Retrieve the rscn count (if a valid one exists) */
7708 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7709 				rscn_count = ((fc_ulp_rscn_info_t *)
7710 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7711 				    ulp_rscn_count;
7712 			} else {
7713 				rscn_count = FC_INVALID_RSCN_COUNT;
7714 			}
7715 
7716 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7717 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7718 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7719 				mutex_enter(&pptr->port_mutex);
7720 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7721 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7722 					fcp_log(CE_NOTE, pptr->port_dip,
7723 					    "!failed to send INQUIRY"
7724 					    " target=%x lun=%x",
7725 					    ptgt->tgt_d_id, plun->lun_num);
7726 				} else {
7727 					FCP_TRACE(fcp_logq,
7728 					    pptr->port_instbuf, fcp_trace,
7729 					    FCP_BUF_LEVEL_5, 0,
7730 					    "fcp_handle_reportlun,2: state"
7731 					    " change occured for D_ID=0x%x",
7732 					    ptgt->tgt_d_id);
7733 				}
7734 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7735 				mutex_exit(&pptr->port_mutex);
7736 			} else {
7737 				continue;
7738 			}
7739 			break;
7740 
7741 		case FCP_VOLUME_ADDRESSING:
7742 			/* FALLTHROUGH */
7743 		default:
7744 			fcp_log(CE_WARN, NULL,
7745 			    "!Unsupported LUN Addressing method %x "
7746 			    "in response to REPORT_LUN", lun_string[0]);
7747 			break;
7748 		}
7749 
7750 		/*
7751 		 * each time through this loop we should decrement
7752 		 * the tmp_cnt by one -- since we go through this loop
7753 		 * one time for each LUN, the tmp_cnt should never be <=0
7754 		 */
7755 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7756 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7757 	}
7758 
7759 	if (i == 0) {
7760 		fcp_log(CE_WARN, pptr->port_dip,
7761 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7762 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7763 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7764 	}
7765 
7766 	kmem_free(report_lun, len);
7767 	fcp_icmd_free(pptr, icmd);
7768 }
7769 
7770 
7771 /*
7772  * called internally to return a LUN given a target and a LUN number
7773  */
7774 static struct fcp_lun *
7775 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7776 {
7777 	struct fcp_lun	*plun;
7778 
7779 	mutex_enter(&ptgt->tgt_mutex);
7780 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7781 		if (plun->lun_num == lun_num) {
7782 			mutex_exit(&ptgt->tgt_mutex);
7783 			return (plun);
7784 		}
7785 	}
7786 	mutex_exit(&ptgt->tgt_mutex);
7787 
7788 	return (NULL);
7789 }
7790 
7791 
7792 /*
7793  * handle finishing one target for fcp_finish_init
7794  *
7795  * return true (non-zero) if we want finish_init to continue with the
7796  * next target
7797  *
7798  * called with the port mutex held
7799  */
7800 /*ARGSUSED*/
7801 static int
7802 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7803     int link_cnt, int tgt_cnt, int cause)
7804 {
7805 	int	rval = 1;
7806 	ASSERT(pptr != NULL);
7807 	ASSERT(ptgt != NULL);
7808 
7809 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7810 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7811 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7812 	    ptgt->tgt_state);
7813 
7814 	ASSERT(mutex_owned(&pptr->port_mutex));
7815 
7816 	if ((pptr->port_link_cnt != link_cnt) ||
7817 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7818 		/*
7819 		 * oh oh -- another link reset or target change
7820 		 * must have occurred while we are in here
7821 		 */
7822 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7823 
7824 		return (0);
7825 	} else {
7826 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7827 	}
7828 
7829 	mutex_enter(&ptgt->tgt_mutex);
7830 
7831 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7832 		/*
7833 		 * tgt is not offline -- is it marked (i.e. needs
7834 		 * to be offlined) ??
7835 		 */
7836 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7837 			/*
7838 			 * this target not offline *and*
7839 			 * marked
7840 			 */
7841 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7842 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7843 			    tgt_cnt, 0, 0);
7844 		} else {
7845 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7846 
7847 			/* create the LUNs */
7848 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7849 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7850 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7851 				    cause);
7852 				ptgt->tgt_device_created = 1;
7853 			} else {
7854 				fcp_update_tgt_state(ptgt, FCP_RESET,
7855 				    FCP_LUN_BUSY);
7856 			}
7857 		}
7858 	}
7859 
7860 	mutex_exit(&ptgt->tgt_mutex);
7861 
7862 	return (rval);
7863 }
7864 
7865 
7866 /*
7867  * this routine is called to finish port initialization
7868  *
7869  * Each port has a "temp" counter -- when a state change happens (e.g.
7870  * port online), the temp count is set to the number of devices in the map.
7871  * Then, as each device gets "discovered", the temp counter is decremented
7872  * by one.  When this count reaches zero we know that all of the devices
7873  * in the map have been discovered (or an error has occurred), so we can
7874  * then finish initialization -- which is done by this routine (well, this
7875  * and fcp-finish_tgt())
7876  *
7877  * acquires and releases the global mutex
7878  *
7879  * called with the port mutex owned
7880  */
7881 static void
7882 fcp_finish_init(struct fcp_port *pptr)
7883 {
7884 #ifdef	DEBUG
7885 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
7886 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
7887 	    FCP_STACK_DEPTH);
7888 #endif /* DEBUG */
7889 
7890 	ASSERT(mutex_owned(&pptr->port_mutex));
7891 
7892 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7893 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
7894 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
7895 
7896 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
7897 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
7898 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
7899 		pptr->port_state &= ~FCP_STATE_ONLINING;
7900 		pptr->port_state |= FCP_STATE_ONLINE;
7901 	}
7902 
7903 	/* Wake up threads waiting on config done */
7904 	cv_broadcast(&pptr->port_config_cv);
7905 }
7906 
7907 
7908 /*
7909  * called from fcp_finish_init to create the LUNs for a target
7910  *
7911  * called with the port mutex owned
7912  */
7913 static void
7914 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
7915 {
7916 	struct fcp_lun	*plun;
7917 	struct fcp_port	*pptr;
7918 	child_info_t		*cip = NULL;
7919 
7920 	ASSERT(ptgt != NULL);
7921 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
7922 
7923 	pptr = ptgt->tgt_port;
7924 
7925 	ASSERT(pptr != NULL);
7926 
7927 	/* scan all LUNs for this target */
7928 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7929 		if (plun->lun_state & FCP_LUN_OFFLINE) {
7930 			continue;
7931 		}
7932 
7933 		if (plun->lun_state & FCP_LUN_MARK) {
7934 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7935 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7936 			    "fcp_create_luns: offlining marked LUN!");
7937 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
7938 			continue;
7939 		}
7940 
7941 		plun->lun_state &= ~FCP_LUN_BUSY;
7942 
7943 		/*
7944 		 * There are conditions in which FCP_LUN_INIT flag is cleared
7945 		 * but we have a valid plun->lun_cip. To cover this case also
7946 		 * CLEAR_BUSY whenever we have a valid lun_cip.
7947 		 */
7948 		if (plun->lun_mpxio && plun->lun_cip &&
7949 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
7950 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
7951 		    0, 0))) {
7952 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7953 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7954 			    "fcp_create_luns: enable lun %p failed!",
7955 			    plun);
7956 		}
7957 
7958 		if (plun->lun_state & FCP_LUN_INIT &&
7959 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
7960 			continue;
7961 		}
7962 
7963 		if (cause == FCP_CAUSE_USER_CREATE) {
7964 			continue;
7965 		}
7966 
7967 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7968 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
7969 		    "create_luns: passing ONLINE elem to HP thread");
7970 
7971 		/*
7972 		 * If lun has changed, prepare for offlining the old path.
7973 		 * Do not offline the old path right now, since it may be
7974 		 * still opened.
7975 		 */
7976 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
7977 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
7978 		}
7979 
7980 		/* pass an ONLINE element to the hotplug thread */
7981 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7982 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
7983 
7984 			/*
7985 			 * We can not synchronous attach (i.e pass
7986 			 * NDI_ONLINE_ATTACH) here as we might be
7987 			 * coming from an interrupt or callback
7988 			 * thread.
7989 			 */
7990 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7991 			    link_cnt, tgt_cnt, 0, 0)) {
7992 				fcp_log(CE_CONT, pptr->port_dip,
7993 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
7994 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
7995 			}
7996 		}
7997 	}
7998 }
7999 
8000 
8001 /*
8002  * function to online/offline devices
8003  */
8004 static int
8005 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8006     int online, int lcount, int tcount, int flags)
8007 {
8008 	int			rval = NDI_FAILURE;
8009 	int			circ;
8010 	child_info_t		*ccip;
8011 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8012 	int			is_mpxio = pptr->port_mpxio;
8013 	dev_info_t		*cdip, *pdip;
8014 	char			*devname;
8015 
8016 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8017 		/*
8018 		 * When this event gets serviced, lun_cip and lun_mpxio
8019 		 * has changed, so it should be invalidated now.
8020 		 */
8021 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8022 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8023 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8024 		return (rval);
8025 	}
8026 
8027 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8028 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8029 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8030 	    "flags=%x mpxio=%x\n",
8031 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8032 	    plun->lun_mpxio);
8033 
8034 	/*
8035 	 * lun_mpxio needs checking here because we can end up in a race
8036 	 * condition where this task has been dispatched while lun_mpxio is
8037 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8038 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8039 	 * the flag. We rely on the serialization of the tasks here. We return
8040 	 * NDI_SUCCESS so any callers continue without reporting spurious
8041 	 * errors, and the still think we're an MPXIO LUN.
8042 	 */
8043 
8044 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8045 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8046 		if (plun->lun_mpxio) {
8047 			rval = fcp_update_mpxio_path(plun, cip, online);
8048 		} else {
8049 			rval = NDI_SUCCESS;
8050 		}
8051 		return (rval);
8052 	}
8053 
8054 	/*
8055 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8056 	 * executing devfs_clean() if parent lock is held.
8057 	 */
8058 	ASSERT(!servicing_interrupt());
8059 	if (online == FCP_OFFLINE) {
8060 		if (plun->lun_mpxio == 0) {
8061 			if (plun->lun_cip == cip) {
8062 				cdip = DIP(plun->lun_cip);
8063 			} else {
8064 				cdip = DIP(cip);
8065 			}
8066 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8067 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8068 		} else if ((plun->lun_cip != cip) && cip) {
8069 			/*
8070 			 * This means a DTYPE/GUID change, we shall get the
8071 			 * dip of the old cip instead of the current lun_cip.
8072 			 */
8073 			cdip = mdi_pi_get_client(PIP(cip));
8074 		}
8075 		if (cdip) {
8076 			if (i_ddi_devi_attached(cdip)) {
8077 				pdip = ddi_get_parent(cdip);
8078 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8079 				ndi_devi_enter(pdip, &circ);
8080 				(void) ddi_deviname(cdip, devname);
8081 				ndi_devi_exit(pdip, circ);
8082 				/*
8083 				 * Release parent lock before calling
8084 				 * devfs_clean().
8085 				 */
8086 				rval = devfs_clean(pdip, devname + 1,
8087 				    DV_CLEAN_FORCE);
8088 				kmem_free(devname, MAXNAMELEN + 1);
8089 				/*
8090 				 * Return if devfs_clean() fails for
8091 				 * non-MPXIO case.
8092 				 * For MPXIO case, another path could be
8093 				 * offlined.
8094 				 */
8095 				if (rval && plun->lun_mpxio == 0) {
8096 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8097 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8098 					    "fcp_trigger_lun: devfs_clean "
8099 					    "failed rval=%x  dip=%p",
8100 					    rval, pdip);
8101 					return (NDI_FAILURE);
8102 				}
8103 			}
8104 		}
8105 	}
8106 
8107 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8108 		return (NDI_FAILURE);
8109 	}
8110 
8111 	if (is_mpxio) {
8112 		mdi_devi_enter(pptr->port_dip, &circ);
8113 	} else {
8114 		ndi_devi_enter(pptr->port_dip, &circ);
8115 	}
8116 
8117 	mutex_enter(&pptr->port_mutex);
8118 	mutex_enter(&plun->lun_mutex);
8119 
8120 	if (online == FCP_ONLINE) {
8121 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8122 		if (ccip == NULL) {
8123 			goto fail;
8124 		}
8125 	} else {
8126 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8127 			goto fail;
8128 		}
8129 		ccip = cip;
8130 	}
8131 
8132 	if (online == FCP_ONLINE) {
8133 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8134 		    &circ);
8135 		fc_ulp_log_device_event(pptr->port_fp_handle,
8136 		    FC_ULP_DEVICE_ONLINE);
8137 	} else {
8138 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8139 		    &circ);
8140 		fc_ulp_log_device_event(pptr->port_fp_handle,
8141 		    FC_ULP_DEVICE_OFFLINE);
8142 	}
8143 
8144 fail:	mutex_exit(&plun->lun_mutex);
8145 	mutex_exit(&pptr->port_mutex);
8146 
8147 	if (is_mpxio) {
8148 		mdi_devi_exit(pptr->port_dip, circ);
8149 	} else {
8150 		ndi_devi_exit(pptr->port_dip, circ);
8151 	}
8152 
8153 	fc_ulp_idle_port(pptr->port_fp_handle);
8154 
8155 	return (rval);
8156 }
8157 
8158 
8159 /*
8160  * take a target offline by taking all of its LUNs offline
8161  */
8162 /*ARGSUSED*/
8163 static int
8164 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8165     int link_cnt, int tgt_cnt, int nowait, int flags)
8166 {
8167 	struct fcp_tgt_elem	*elem;
8168 
8169 	ASSERT(mutex_owned(&pptr->port_mutex));
8170 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8171 
8172 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8173 
8174 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8175 	    ptgt->tgt_change_cnt)) {
8176 		mutex_exit(&ptgt->tgt_mutex);
8177 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8178 		mutex_enter(&ptgt->tgt_mutex);
8179 
8180 		return (0);
8181 	}
8182 
8183 	ptgt->tgt_pd_handle = NULL;
8184 	mutex_exit(&ptgt->tgt_mutex);
8185 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8186 	mutex_enter(&ptgt->tgt_mutex);
8187 
8188 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8189 
8190 	if (ptgt->tgt_tcap &&
8191 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8192 		elem->flags = flags;
8193 		elem->time = fcp_watchdog_time;
8194 		if (nowait == 0) {
8195 			elem->time += fcp_offline_delay;
8196 		}
8197 		elem->ptgt = ptgt;
8198 		elem->link_cnt = link_cnt;
8199 		elem->tgt_cnt = tgt_cnt;
8200 		elem->next = pptr->port_offline_tgts;
8201 		pptr->port_offline_tgts = elem;
8202 	} else {
8203 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8204 	}
8205 
8206 	return (1);
8207 }
8208 
8209 
8210 static void
8211 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8212     int link_cnt, int tgt_cnt, int flags)
8213 {
8214 	ASSERT(mutex_owned(&pptr->port_mutex));
8215 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8216 
8217 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8218 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8219 	ptgt->tgt_pd_handle = NULL;
8220 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8221 }
8222 
8223 
8224 static void
8225 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8226     int flags)
8227 {
8228 	struct	fcp_lun	*plun;
8229 
8230 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8231 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8232 
8233 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8234 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8235 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8236 		}
8237 	}
8238 }
8239 
8240 
8241 /*
8242  * take a LUN offline
8243  *
8244  * enters and leaves with the target mutex held, releasing it in the process
8245  *
8246  * allocates memory in non-sleep mode
8247  */
8248 static void
8249 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8250     int nowait, int flags)
8251 {
8252 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8253 	struct fcp_lun_elem	*elem;
8254 
8255 	ASSERT(plun != NULL);
8256 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8257 
8258 	if (nowait) {
8259 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8260 		return;
8261 	}
8262 
8263 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8264 		elem->flags = flags;
8265 		elem->time = fcp_watchdog_time;
8266 		if (nowait == 0) {
8267 			elem->time += fcp_offline_delay;
8268 		}
8269 		elem->plun = plun;
8270 		elem->link_cnt = link_cnt;
8271 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8272 		elem->next = pptr->port_offline_luns;
8273 		pptr->port_offline_luns = elem;
8274 	} else {
8275 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8276 	}
8277 }
8278 
8279 
8280 static void
8281 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8282 {
8283 	struct fcp_pkt	*head = NULL;
8284 
8285 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8286 
8287 	mutex_exit(&LUN_TGT->tgt_mutex);
8288 
8289 	head = fcp_scan_commands(plun);
8290 	if (head != NULL) {
8291 		fcp_abort_commands(head, LUN_PORT);
8292 	}
8293 
8294 	mutex_enter(&LUN_TGT->tgt_mutex);
8295 
8296 	if (plun->lun_cip && plun->lun_mpxio) {
8297 		/*
8298 		 * Intimate MPxIO lun busy is cleared
8299 		 */
8300 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8301 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8302 		    0, 0)) {
8303 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8304 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8305 			    LUN_TGT->tgt_d_id, plun->lun_num);
8306 		}
8307 		/*
8308 		 * Intimate MPxIO that the lun is now marked for offline
8309 		 */
8310 		mutex_exit(&LUN_TGT->tgt_mutex);
8311 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8312 		mutex_enter(&LUN_TGT->tgt_mutex);
8313 	}
8314 }
8315 
8316 static void
8317 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8318     int flags)
8319 {
8320 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8321 
8322 	mutex_exit(&LUN_TGT->tgt_mutex);
8323 	fcp_update_offline_flags(plun);
8324 	mutex_enter(&LUN_TGT->tgt_mutex);
8325 
8326 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8327 
8328 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8329 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8330 	    "offline_lun: passing OFFLINE elem to HP thread");
8331 
8332 	if (plun->lun_cip) {
8333 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8334 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8335 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8336 		    LUN_TGT->tgt_trace);
8337 
8338 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8339 		    link_cnt, tgt_cnt, flags, 0)) {
8340 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8341 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8342 			    LUN_TGT->tgt_d_id, plun->lun_num);
8343 		}
8344 	}
8345 }
8346 
8347 static void
8348 fcp_scan_offline_luns(struct fcp_port *pptr)
8349 {
8350 	struct fcp_lun_elem	*elem;
8351 	struct fcp_lun_elem	*prev;
8352 	struct fcp_lun_elem	*next;
8353 
8354 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8355 
8356 	prev = NULL;
8357 	elem = pptr->port_offline_luns;
8358 	while (elem) {
8359 		next = elem->next;
8360 		if (elem->time <= fcp_watchdog_time) {
8361 			int			changed = 1;
8362 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8363 
8364 			mutex_enter(&ptgt->tgt_mutex);
8365 			if (pptr->port_link_cnt == elem->link_cnt &&
8366 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8367 				changed = 0;
8368 			}
8369 
8370 			if (!changed &&
8371 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8372 				fcp_offline_lun_now(elem->plun,
8373 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8374 			}
8375 			mutex_exit(&ptgt->tgt_mutex);
8376 
8377 			kmem_free(elem, sizeof (*elem));
8378 
8379 			if (prev) {
8380 				prev->next = next;
8381 			} else {
8382 				pptr->port_offline_luns = next;
8383 			}
8384 		} else {
8385 			prev = elem;
8386 		}
8387 		elem = next;
8388 	}
8389 }
8390 
8391 
8392 static void
8393 fcp_scan_offline_tgts(struct fcp_port *pptr)
8394 {
8395 	struct fcp_tgt_elem	*elem;
8396 	struct fcp_tgt_elem	*prev;
8397 	struct fcp_tgt_elem	*next;
8398 
8399 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8400 
8401 	prev = NULL;
8402 	elem = pptr->port_offline_tgts;
8403 	while (elem) {
8404 		next = elem->next;
8405 		if (elem->time <= fcp_watchdog_time) {
8406 			int			changed = 1;
8407 			struct fcp_tgt	*ptgt = elem->ptgt;
8408 
8409 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8410 				changed = 0;
8411 			}
8412 
8413 			mutex_enter(&ptgt->tgt_mutex);
8414 			if (!changed && !(ptgt->tgt_state &
8415 			    FCP_TGT_OFFLINE)) {
8416 				fcp_offline_target_now(pptr,
8417 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8418 				    elem->flags);
8419 			}
8420 			mutex_exit(&ptgt->tgt_mutex);
8421 
8422 			kmem_free(elem, sizeof (*elem));
8423 
8424 			if (prev) {
8425 				prev->next = next;
8426 			} else {
8427 				pptr->port_offline_tgts = next;
8428 			}
8429 		} else {
8430 			prev = elem;
8431 		}
8432 		elem = next;
8433 	}
8434 }
8435 
8436 
8437 static void
8438 fcp_update_offline_flags(struct fcp_lun *plun)
8439 {
8440 	struct fcp_port	*pptr = LUN_PORT;
8441 	ASSERT(plun != NULL);
8442 
8443 	mutex_enter(&LUN_TGT->tgt_mutex);
8444 	plun->lun_state |= FCP_LUN_OFFLINE;
8445 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8446 
8447 	mutex_enter(&plun->lun_mutex);
8448 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8449 		dev_info_t *cdip = NULL;
8450 
8451 		mutex_exit(&LUN_TGT->tgt_mutex);
8452 
8453 		if (plun->lun_mpxio == 0) {
8454 			cdip = DIP(plun->lun_cip);
8455 		} else if (plun->lun_cip) {
8456 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8457 		}
8458 
8459 		mutex_exit(&plun->lun_mutex);
8460 		if (cdip) {
8461 			(void) ndi_event_retrieve_cookie(
8462 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8463 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8464 			(void) ndi_event_run_callbacks(
8465 			    pptr->port_ndi_event_hdl, cdip,
8466 			    fcp_remove_eid, NULL);
8467 		}
8468 	} else {
8469 		mutex_exit(&plun->lun_mutex);
8470 		mutex_exit(&LUN_TGT->tgt_mutex);
8471 	}
8472 }
8473 
8474 
8475 /*
8476  * Scan all of the command pkts for this port, moving pkts that
8477  * match our LUN onto our own list (headed by "head")
8478  */
8479 static struct fcp_pkt *
8480 fcp_scan_commands(struct fcp_lun *plun)
8481 {
8482 	struct fcp_port	*pptr = LUN_PORT;
8483 
8484 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8485 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8486 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8487 
8488 	struct fcp_pkt	*head = NULL;	/* head of our list */
8489 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8490 
8491 	int			cmds_found = 0;
8492 
8493 	mutex_enter(&pptr->port_pkt_mutex);
8494 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8495 		struct fcp_lun *tlun =
8496 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8497 
8498 		ncmd = cmd->cmd_next;	/* set next command */
8499 
8500 		/*
8501 		 * if this pkt is for a different LUN  or the
8502 		 * command is sent down, skip it.
8503 		 */
8504 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8505 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8506 			pcmd = cmd;
8507 			continue;
8508 		}
8509 		cmds_found++;
8510 		if (pcmd != NULL) {
8511 			ASSERT(pptr->port_pkt_head != cmd);
8512 			pcmd->cmd_next = cmd->cmd_next;
8513 		} else {
8514 			ASSERT(cmd == pptr->port_pkt_head);
8515 			pptr->port_pkt_head = cmd->cmd_next;
8516 		}
8517 
8518 		if (cmd == pptr->port_pkt_tail) {
8519 			pptr->port_pkt_tail = pcmd;
8520 			if (pcmd) {
8521 				pcmd->cmd_next = NULL;
8522 			}
8523 		}
8524 
8525 		if (head == NULL) {
8526 			head = tail = cmd;
8527 		} else {
8528 			ASSERT(tail != NULL);
8529 
8530 			tail->cmd_next = cmd;
8531 			tail = cmd;
8532 		}
8533 		cmd->cmd_next = NULL;
8534 	}
8535 	mutex_exit(&pptr->port_pkt_mutex);
8536 
8537 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8538 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8539 	    "scan commands: %d cmd(s) found", cmds_found);
8540 
8541 	return (head);
8542 }
8543 
8544 
8545 /*
8546  * Abort all the commands in the command queue
8547  */
8548 static void
8549 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8550 {
8551 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8552 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8553 
8554 	ASSERT(mutex_owned(&pptr->port_mutex));
8555 
8556 	/* scan through the pkts and invalid them */
8557 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8558 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8559 
8560 		ncmd = cmd->cmd_next;
8561 		ASSERT(pkt != NULL);
8562 
8563 		/*
8564 		 * The lun is going to be marked offline. Indicate
8565 		 * the target driver not to requeue or retry this command
8566 		 * as the device is going to be offlined pretty soon.
8567 		 */
8568 		pkt->pkt_reason = CMD_DEV_GONE;
8569 		pkt->pkt_statistics = 0;
8570 		pkt->pkt_state = 0;
8571 
8572 		/* reset cmd flags/state */
8573 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8574 		cmd->cmd_state = FCP_PKT_IDLE;
8575 
8576 		/*
8577 		 * ensure we have a packet completion routine,
8578 		 * then call it.
8579 		 */
8580 		ASSERT(pkt->pkt_comp != NULL);
8581 
8582 		mutex_exit(&pptr->port_mutex);
8583 		fcp_post_callback(cmd);
8584 		mutex_enter(&pptr->port_mutex);
8585 	}
8586 }
8587 
8588 
8589 /*
8590  * the pkt_comp callback for command packets
8591  */
8592 static void
8593 fcp_cmd_callback(fc_packet_t *fpkt)
8594 {
8595 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8596 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8597 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8598 
8599 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8600 
8601 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8602 		cmn_err(CE_PANIC, "Packet already completed %p",
8603 		    (void *)cmd);
8604 	}
8605 
8606 	/*
8607 	 * Watch thread should be freeing the packet, ignore the pkt.
8608 	 */
8609 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8610 		fcp_log(CE_CONT, pptr->port_dip,
8611 		    "!FCP: Pkt completed while aborting\n");
8612 		return;
8613 	}
8614 	cmd->cmd_state = FCP_PKT_IDLE;
8615 
8616 	fcp_complete_pkt(fpkt);
8617 
8618 #ifdef	DEBUG
8619 	mutex_enter(&pptr->port_pkt_mutex);
8620 	pptr->port_npkts--;
8621 	mutex_exit(&pptr->port_pkt_mutex);
8622 #endif /* DEBUG */
8623 
8624 	fcp_post_callback(cmd);
8625 }
8626 
8627 
8628 static void
8629 fcp_complete_pkt(fc_packet_t *fpkt)
8630 {
8631 	int			error = 0;
8632 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8633 	    fpkt->pkt_ulp_private;
8634 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8635 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8636 	struct fcp_lun	*plun;
8637 	struct fcp_tgt	*ptgt;
8638 	struct fcp_rsp		*rsp;
8639 	struct scsi_address	save;
8640 
8641 #ifdef	DEBUG
8642 	save = pkt->pkt_address;
8643 #endif /* DEBUG */
8644 
8645 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8646 
8647 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8648 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8649 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8650 			    sizeof (struct fcp_rsp));
8651 		}
8652 
8653 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8654 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8655 
8656 		pkt->pkt_resid = 0;
8657 
8658 		if (cmd->cmd_pkt->pkt_numcookies) {
8659 			pkt->pkt_state |= STATE_XFERRED_DATA;
8660 			if (fpkt->pkt_data_resid) {
8661 				error++;
8662 			}
8663 		}
8664 
8665 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8666 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8667 			/*
8668 			 * The next two checks make sure that if there
8669 			 * is no sense data or a valid response and
8670 			 * the command came back with check condition,
8671 			 * the command should be retried.
8672 			 */
8673 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8674 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8675 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8676 				pkt->pkt_resid = cmd->cmd_dmacount;
8677 			}
8678 		}
8679 
8680 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8681 			return;
8682 		}
8683 
8684 		plun = ADDR2LUN(&pkt->pkt_address);
8685 		ptgt = plun->lun_tgt;
8686 		ASSERT(ptgt != NULL);
8687 
8688 		/*
8689 		 * Update the transfer resid, if appropriate
8690 		 */
8691 		if (rsp->fcp_u.fcp_status.resid_over ||
8692 		    rsp->fcp_u.fcp_status.resid_under) {
8693 			pkt->pkt_resid = rsp->fcp_resid;
8694 		}
8695 
8696 		/*
8697 		 * First see if we got a FCP protocol error.
8698 		 */
8699 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8700 			struct fcp_rsp_info	*bep;
8701 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8702 			    sizeof (struct fcp_rsp));
8703 
8704 			if (fcp_validate_fcp_response(rsp, pptr) !=
8705 			    FC_SUCCESS) {
8706 				pkt->pkt_reason = CMD_CMPLT;
8707 				*(pkt->pkt_scbp) = STATUS_CHECK;
8708 
8709 				fcp_log(CE_WARN, pptr->port_dip,
8710 				    "!SCSI command to d_id=0x%x lun=0x%x"
8711 				    " failed, Bad FCP response values:"
8712 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8713 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8714 				    ptgt->tgt_d_id, plun->lun_num,
8715 				    rsp->reserved_0, rsp->reserved_1,
8716 				    rsp->fcp_u.fcp_status.reserved_0,
8717 				    rsp->fcp_u.fcp_status.reserved_1,
8718 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8719 
8720 				return;
8721 			}
8722 
8723 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8724 				FCP_CP_IN(fpkt->pkt_resp +
8725 				    sizeof (struct fcp_rsp), bep,
8726 				    fpkt->pkt_resp_acc,
8727 				    sizeof (struct fcp_rsp_info));
8728 			}
8729 
8730 			if (bep->rsp_code != FCP_NO_FAILURE) {
8731 				child_info_t	*cip;
8732 
8733 				pkt->pkt_reason = CMD_TRAN_ERR;
8734 
8735 				mutex_enter(&plun->lun_mutex);
8736 				cip = plun->lun_cip;
8737 				mutex_exit(&plun->lun_mutex);
8738 
8739 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8740 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8741 				    "FCP response error on cmd=%p"
8742 				    " target=0x%x, cip=%p", cmd,
8743 				    ptgt->tgt_d_id, cip);
8744 			}
8745 		}
8746 
8747 		/*
8748 		 * See if we got a SCSI error with sense data
8749 		 */
8750 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8751 			uchar_t				rqlen;
8752 			caddr_t				sense_from;
8753 			child_info_t			*cip;
8754 			timeout_id_t			tid;
8755 			struct scsi_arq_status		*arq;
8756 			struct scsi_extended_sense	*sense_to;
8757 
8758 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8759 			sense_to = &arq->sts_sensedata;
8760 
8761 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8762 			    sizeof (struct scsi_extended_sense));
8763 
8764 			sense_from = (caddr_t)fpkt->pkt_resp +
8765 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8766 
8767 			if (fcp_validate_fcp_response(rsp, pptr) !=
8768 			    FC_SUCCESS) {
8769 				pkt->pkt_reason = CMD_CMPLT;
8770 				*(pkt->pkt_scbp) = STATUS_CHECK;
8771 
8772 				fcp_log(CE_WARN, pptr->port_dip,
8773 				    "!SCSI command to d_id=0x%x lun=0x%x"
8774 				    " failed, Bad FCP response values:"
8775 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8776 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8777 				    ptgt->tgt_d_id, plun->lun_num,
8778 				    rsp->reserved_0, rsp->reserved_1,
8779 				    rsp->fcp_u.fcp_status.reserved_0,
8780 				    rsp->fcp_u.fcp_status.reserved_1,
8781 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8782 
8783 				return;
8784 			}
8785 
8786 			/*
8787 			 * copy in sense information
8788 			 */
8789 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8790 				FCP_CP_IN(sense_from, sense_to,
8791 				    fpkt->pkt_resp_acc, rqlen);
8792 			} else {
8793 				bcopy(sense_from, sense_to, rqlen);
8794 			}
8795 
8796 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8797 			    (FCP_SENSE_NO_LUN(sense_to))) {
8798 				mutex_enter(&ptgt->tgt_mutex);
8799 				if (ptgt->tgt_tid == NULL) {
8800 					/*
8801 					 * Kick off rediscovery
8802 					 */
8803 					tid = timeout(fcp_reconfigure_luns,
8804 					    (caddr_t)ptgt, drv_usectohz(1));
8805 
8806 					ptgt->tgt_tid = tid;
8807 					ptgt->tgt_state |= FCP_TGT_BUSY;
8808 				}
8809 				mutex_exit(&ptgt->tgt_mutex);
8810 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8811 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8812 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8813 					    "!FCP: Report Lun Has Changed"
8814 					    " target=%x", ptgt->tgt_d_id);
8815 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8816 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8817 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8818 					    "!FCP: LU Not Supported"
8819 					    " target=%x", ptgt->tgt_d_id);
8820 				}
8821 			}
8822 			ASSERT(pkt->pkt_scbp != NULL);
8823 
8824 			pkt->pkt_state |= STATE_ARQ_DONE;
8825 
8826 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8827 
8828 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8829 			arq->sts_rqpkt_reason = 0;
8830 			arq->sts_rqpkt_statistics = 0;
8831 
8832 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8833 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8834 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8835 			    STATE_XFERRED_DATA;
8836 
8837 			mutex_enter(&plun->lun_mutex);
8838 			cip = plun->lun_cip;
8839 			mutex_exit(&plun->lun_mutex);
8840 
8841 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8842 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8843 			    "SCSI Check condition on cmd=%p target=0x%x"
8844 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8845 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8846 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8847 			    rsp->fcp_u.fcp_status.scsi_status,
8848 			    sense_to->es_key, sense_to->es_add_code,
8849 			    sense_to->es_qual_code);
8850 		}
8851 	} else {
8852 		plun = ADDR2LUN(&pkt->pkt_address);
8853 		ptgt = plun->lun_tgt;
8854 		ASSERT(ptgt != NULL);
8855 
8856 		/*
8857 		 * Work harder to translate errors into target driver
8858 		 * understandable ones. Note with despair that the target
8859 		 * drivers don't decode pkt_state and pkt_reason exhaustively
8860 		 * They resort to using the big hammer most often, which
8861 		 * may not get fixed in the life time of this driver.
8862 		 */
8863 		pkt->pkt_state = 0;
8864 		pkt->pkt_statistics = 0;
8865 
8866 		switch (fpkt->pkt_state) {
8867 		case FC_PKT_TRAN_ERROR:
8868 			switch (fpkt->pkt_reason) {
8869 			case FC_REASON_OVERRUN:
8870 				pkt->pkt_reason = CMD_CMD_OVR;
8871 				pkt->pkt_statistics |= STAT_ABORTED;
8872 				break;
8873 
8874 			case FC_REASON_XCHG_BSY: {
8875 				caddr_t ptr;
8876 
8877 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8878 
8879 				ptr = (caddr_t)pkt->pkt_scbp;
8880 				if (ptr) {
8881 					*ptr = STATUS_BUSY;
8882 				}
8883 				break;
8884 			}
8885 
8886 			case FC_REASON_ABORTED:
8887 				pkt->pkt_reason = CMD_TRAN_ERR;
8888 				pkt->pkt_statistics |= STAT_ABORTED;
8889 				break;
8890 
8891 			case FC_REASON_ABORT_FAILED:
8892 				pkt->pkt_reason = CMD_ABORT_FAIL;
8893 				break;
8894 
8895 			case FC_REASON_NO_SEQ_INIT:
8896 			case FC_REASON_CRC_ERROR:
8897 				pkt->pkt_reason = CMD_TRAN_ERR;
8898 				pkt->pkt_statistics |= STAT_ABORTED;
8899 				break;
8900 			default:
8901 				pkt->pkt_reason = CMD_TRAN_ERR;
8902 				break;
8903 			}
8904 			break;
8905 
8906 		case FC_PKT_PORT_OFFLINE: {
8907 			dev_info_t	*cdip = NULL;
8908 			caddr_t		ptr;
8909 
8910 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
8911 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8912 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
8913 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
8914 				    ptgt->tgt_d_id);
8915 			}
8916 
8917 			mutex_enter(&plun->lun_mutex);
8918 			if (plun->lun_mpxio == 0) {
8919 				cdip = DIP(plun->lun_cip);
8920 			} else if (plun->lun_cip) {
8921 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8922 			}
8923 
8924 			mutex_exit(&plun->lun_mutex);
8925 
8926 			if (cdip) {
8927 				(void) ndi_event_retrieve_cookie(
8928 				    pptr->port_ndi_event_hdl, cdip,
8929 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
8930 				    NDI_EVENT_NOPASS);
8931 				(void) ndi_event_run_callbacks(
8932 				    pptr->port_ndi_event_hdl, cdip,
8933 				    fcp_remove_eid, NULL);
8934 			}
8935 
8936 			/*
8937 			 * If the link goes off-line for a lip,
8938 			 * this will cause a error to the ST SG
8939 			 * SGEN drivers. By setting BUSY we will
8940 			 * give the drivers the chance to retry
8941 			 * before it blows of the job. ST will
8942 			 * remember how many times it has retried.
8943 			 */
8944 
8945 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
8946 			    (plun->lun_type == DTYPE_CHANGER)) {
8947 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8948 				ptr = (caddr_t)pkt->pkt_scbp;
8949 				if (ptr) {
8950 					*ptr = STATUS_BUSY;
8951 				}
8952 			} else {
8953 				pkt->pkt_reason = CMD_TRAN_ERR;
8954 				pkt->pkt_statistics |= STAT_BUS_RESET;
8955 			}
8956 			break;
8957 		}
8958 
8959 		case FC_PKT_TRAN_BSY:
8960 			/*
8961 			 * Use the ssd Qfull handling here.
8962 			 */
8963 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
8964 			pkt->pkt_state = STATE_GOT_BUS;
8965 			break;
8966 
8967 		case FC_PKT_TIMEOUT:
8968 			pkt->pkt_reason = CMD_TIMEOUT;
8969 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
8970 				pkt->pkt_statistics |= STAT_TIMEOUT;
8971 			} else {
8972 				pkt->pkt_statistics |= STAT_ABORTED;
8973 			}
8974 			break;
8975 
8976 		case FC_PKT_LOCAL_RJT:
8977 			switch (fpkt->pkt_reason) {
8978 			case FC_REASON_OFFLINE: {
8979 				dev_info_t	*cdip = NULL;
8980 
8981 				mutex_enter(&plun->lun_mutex);
8982 				if (plun->lun_mpxio == 0) {
8983 					cdip = DIP(plun->lun_cip);
8984 				} else if (plun->lun_cip) {
8985 					cdip = mdi_pi_get_client(
8986 					    PIP(plun->lun_cip));
8987 				}
8988 				mutex_exit(&plun->lun_mutex);
8989 
8990 				if (cdip) {
8991 					(void) ndi_event_retrieve_cookie(
8992 					    pptr->port_ndi_event_hdl, cdip,
8993 					    FCAL_REMOVE_EVENT,
8994 					    &fcp_remove_eid,
8995 					    NDI_EVENT_NOPASS);
8996 					(void) ndi_event_run_callbacks(
8997 					    pptr->port_ndi_event_hdl,
8998 					    cdip, fcp_remove_eid, NULL);
8999 				}
9000 
9001 				pkt->pkt_reason = CMD_TRAN_ERR;
9002 				pkt->pkt_statistics |= STAT_BUS_RESET;
9003 
9004 				break;
9005 			}
9006 
9007 			case FC_REASON_NOMEM:
9008 			case FC_REASON_QFULL: {
9009 				caddr_t ptr;
9010 
9011 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9012 				ptr = (caddr_t)pkt->pkt_scbp;
9013 				if (ptr) {
9014 					*ptr = STATUS_BUSY;
9015 				}
9016 				break;
9017 			}
9018 
9019 			case FC_REASON_DMA_ERROR:
9020 				pkt->pkt_reason = CMD_DMA_DERR;
9021 				pkt->pkt_statistics |= STAT_ABORTED;
9022 				break;
9023 
9024 			case FC_REASON_CRC_ERROR:
9025 			case FC_REASON_UNDERRUN: {
9026 				uchar_t		status;
9027 				/*
9028 				 * Work around for Bugid: 4240945.
9029 				 * IB on A5k doesn't set the Underrun bit
9030 				 * in the fcp status, when it is transferring
9031 				 * less than requested amount of data. Work
9032 				 * around the ses problem to keep luxadm
9033 				 * happy till ibfirmware is fixed.
9034 				 */
9035 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9036 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9037 					    fpkt->pkt_resp_acc,
9038 					    sizeof (struct fcp_rsp));
9039 				}
9040 				status = rsp->fcp_u.fcp_status.scsi_status;
9041 				if (((plun->lun_type & DTYPE_MASK) ==
9042 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9043 					pkt->pkt_reason = CMD_CMPLT;
9044 					*pkt->pkt_scbp = status;
9045 					pkt->pkt_resid = 0;
9046 				} else {
9047 					pkt->pkt_reason = CMD_TRAN_ERR;
9048 					pkt->pkt_statistics |= STAT_ABORTED;
9049 				}
9050 				break;
9051 			}
9052 
9053 			case FC_REASON_NO_CONNECTION:
9054 			case FC_REASON_UNSUPPORTED:
9055 			case FC_REASON_ILLEGAL_REQ:
9056 			case FC_REASON_BAD_SID:
9057 			case FC_REASON_DIAG_BUSY:
9058 			case FC_REASON_FCAL_OPN_FAIL:
9059 			case FC_REASON_BAD_XID:
9060 			default:
9061 				pkt->pkt_reason = CMD_TRAN_ERR;
9062 				pkt->pkt_statistics |= STAT_ABORTED;
9063 				break;
9064 
9065 			}
9066 			break;
9067 
9068 		case FC_PKT_NPORT_RJT:
9069 		case FC_PKT_FABRIC_RJT:
9070 		case FC_PKT_NPORT_BSY:
9071 		case FC_PKT_FABRIC_BSY:
9072 		default:
9073 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9074 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9075 			    "FC Status 0x%x, reason 0x%x",
9076 			    fpkt->pkt_state, fpkt->pkt_reason);
9077 			pkt->pkt_reason = CMD_TRAN_ERR;
9078 			pkt->pkt_statistics |= STAT_ABORTED;
9079 			break;
9080 		}
9081 
9082 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9083 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9084 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9085 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9086 		    fpkt->pkt_reason);
9087 	}
9088 
9089 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9090 }
9091 
9092 
9093 static int
9094 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9095 {
9096 	if (rsp->reserved_0 || rsp->reserved_1 ||
9097 	    rsp->fcp_u.fcp_status.reserved_0 ||
9098 	    rsp->fcp_u.fcp_status.reserved_1) {
9099 		/*
9100 		 * These reserved fields should ideally be zero. FCP-2 does say
9101 		 * that the recipient need not check for reserved fields to be
9102 		 * zero. If they are not zero, we will not make a fuss about it
9103 		 * - just log it (in debug to both trace buffer and messages
9104 		 * file and to trace buffer only in non-debug) and move on.
9105 		 *
9106 		 * Non-zero reserved fields were seen with minnows.
9107 		 *
9108 		 * qlc takes care of some of this but we cannot assume that all
9109 		 * FCAs will do so.
9110 		 */
9111 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9112 		    FCP_BUF_LEVEL_5, 0,
9113 		    "Got fcp response packet with non-zero reserved fields "
9114 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9115 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9116 		    rsp->reserved_0, rsp->reserved_1,
9117 		    rsp->fcp_u.fcp_status.reserved_0,
9118 		    rsp->fcp_u.fcp_status.reserved_1);
9119 	}
9120 
9121 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9122 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9123 		return (FC_FAILURE);
9124 	}
9125 
9126 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9127 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9128 	    sizeof (struct fcp_rsp))) {
9129 		return (FC_FAILURE);
9130 	}
9131 
9132 	return (FC_SUCCESS);
9133 }
9134 
9135 
9136 /*
9137  * This is called when there is a change the in device state. The case we're
9138  * handling here is, if the d_id s does not match, offline this tgt and online
9139  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9140  * port_mutex held.
9141  */
9142 static int
9143 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9144     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9145 {
9146 	ASSERT(mutex_owned(&pptr->port_mutex));
9147 
9148 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9149 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9150 	    "Starting fcp_device_changed...");
9151 
9152 	/*
9153 	 * The two cases where the port_device_changed is called is
9154 	 * either it changes it's d_id or it's hard address.
9155 	 */
9156 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9157 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9158 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9159 
9160 		/* offline this target */
9161 		mutex_enter(&ptgt->tgt_mutex);
9162 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9163 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9164 			    0, 1, NDI_DEVI_REMOVE);
9165 		}
9166 		mutex_exit(&ptgt->tgt_mutex);
9167 
9168 		fcp_log(CE_NOTE, pptr->port_dip,
9169 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9170 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9171 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9172 		    map_entry->map_hard_addr.hard_addr);
9173 	}
9174 
9175 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9176 	    link_cnt, tgt_cnt, cause));
9177 }
9178 
9179 /*
9180  *     Function: fcp_alloc_lun
9181  *
9182  *  Description: Creates a new lun structure and adds it to the list
9183  *		 of luns of the target.
9184  *
9185  *     Argument: ptgt		Target the lun will belong to.
9186  *
9187  * Return Value: NULL		Failed
9188  *		 Not NULL	Succeeded
9189  *
9190  *	Context: Kernel context
9191  */
9192 static struct fcp_lun *
9193 fcp_alloc_lun(struct fcp_tgt *ptgt)
9194 {
9195 	struct fcp_lun *plun;
9196 
9197 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9198 	if (plun != NULL) {
9199 		/*
9200 		 * Initialize the mutex before putting in the target list
9201 		 * especially before releasing the target mutex.
9202 		 */
9203 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9204 		plun->lun_tgt = ptgt;
9205 
9206 		mutex_enter(&ptgt->tgt_mutex);
9207 		plun->lun_next = ptgt->tgt_lun;
9208 		ptgt->tgt_lun = plun;
9209 		plun->lun_old_guid = NULL;
9210 		plun->lun_old_guid_size = 0;
9211 		mutex_exit(&ptgt->tgt_mutex);
9212 	}
9213 
9214 	return (plun);
9215 }
9216 
9217 /*
9218  *     Function: fcp_dealloc_lun
9219  *
9220  *  Description: Frees the LUN structure passed by the caller.
9221  *
9222  *     Argument: plun		LUN structure to free.
9223  *
9224  * Return Value: None
9225  *
9226  *	Context: Kernel context.
9227  */
9228 static void
9229 fcp_dealloc_lun(struct fcp_lun *plun)
9230 {
9231 	mutex_enter(&plun->lun_mutex);
9232 	if (plun->lun_cip) {
9233 		fcp_remove_child(plun);
9234 	}
9235 	mutex_exit(&plun->lun_mutex);
9236 
9237 	mutex_destroy(&plun->lun_mutex);
9238 	if (plun->lun_guid) {
9239 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9240 	}
9241 	if (plun->lun_old_guid) {
9242 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9243 	}
9244 	kmem_free(plun, sizeof (*plun));
9245 }
9246 
9247 /*
9248  *     Function: fcp_alloc_tgt
9249  *
9250  *  Description: Creates a new target structure and adds it to the port
9251  *		 hash list.
9252  *
9253  *     Argument: pptr		fcp port structure
9254  *		 *map_entry	entry describing the target to create
9255  *		 link_cnt	Link state change counter
9256  *
9257  * Return Value: NULL		Failed
9258  *		 Not NULL	Succeeded
9259  *
9260  *	Context: Kernel context.
9261  */
9262 static struct fcp_tgt *
9263 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9264 {
9265 	int			hash;
9266 	uchar_t			*wwn;
9267 	struct fcp_tgt	*ptgt;
9268 
9269 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9270 	if (ptgt != NULL) {
9271 		mutex_enter(&pptr->port_mutex);
9272 		if (link_cnt != pptr->port_link_cnt) {
9273 			/*
9274 			 * oh oh -- another link reset
9275 			 * in progress -- give up
9276 			 */
9277 			mutex_exit(&pptr->port_mutex);
9278 			kmem_free(ptgt, sizeof (*ptgt));
9279 			ptgt = NULL;
9280 		} else {
9281 			/*
9282 			 * initialize the mutex before putting in the port
9283 			 * wwn list, especially before releasing the port
9284 			 * mutex.
9285 			 */
9286 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9287 
9288 			/* add new target entry to the port's hash list */
9289 			wwn = (uchar_t *)&map_entry->map_pwwn;
9290 			hash = FCP_HASH(wwn);
9291 
9292 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9293 			pptr->port_tgt_hash_table[hash] = ptgt;
9294 
9295 			/* save cross-ptr */
9296 			ptgt->tgt_port = pptr;
9297 
9298 			ptgt->tgt_change_cnt = 1;
9299 
9300 			/* initialize the target manual_config_only flag */
9301 			if (fcp_enable_auto_configuration) {
9302 				ptgt->tgt_manual_config_only = 0;
9303 			} else {
9304 				ptgt->tgt_manual_config_only = 1;
9305 			}
9306 
9307 			mutex_exit(&pptr->port_mutex);
9308 		}
9309 	}
9310 
9311 	return (ptgt);
9312 }
9313 
9314 /*
9315  *     Function: fcp_dealloc_tgt
9316  *
9317  *  Description: Frees the target structure passed by the caller.
9318  *
9319  *     Argument: ptgt		Target structure to free.
9320  *
9321  * Return Value: None
9322  *
9323  *	Context: Kernel context.
9324  */
9325 static void
9326 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9327 {
9328 	mutex_destroy(&ptgt->tgt_mutex);
9329 	kmem_free(ptgt, sizeof (*ptgt));
9330 }
9331 
9332 
9333 /*
9334  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9335  *
9336  *	Device discovery commands will not be retried for-ever as
9337  *	this will have repercussions on other devices that need to
9338  *	be submitted to the hotplug thread. After a quick glance
9339  *	at the SCSI-3 spec, it was found that the spec doesn't
9340  *	mandate a forever retry, rather recommends a delayed retry.
9341  *
9342  *	Since Photon IB is single threaded, STATUS_BUSY is common
9343  *	in a 4+initiator environment. Make sure the total time
9344  *	spent on retries (including command timeout) does not
9345  *	60 seconds
9346  */
9347 static void
9348 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9349 {
9350 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9351 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9352 
9353 	mutex_enter(&pptr->port_mutex);
9354 	mutex_enter(&ptgt->tgt_mutex);
9355 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9356 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9357 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9358 		    "fcp_queue_ipkt,1:state change occured"
9359 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9360 		mutex_exit(&ptgt->tgt_mutex);
9361 		mutex_exit(&pptr->port_mutex);
9362 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9363 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9364 		fcp_icmd_free(pptr, icmd);
9365 		return;
9366 	}
9367 	mutex_exit(&ptgt->tgt_mutex);
9368 
9369 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9370 
9371 	if (pptr->port_ipkt_list != NULL) {
9372 		/* add pkt to front of doubly-linked list */
9373 		pptr->port_ipkt_list->ipkt_prev = icmd;
9374 		icmd->ipkt_next = pptr->port_ipkt_list;
9375 		pptr->port_ipkt_list = icmd;
9376 		icmd->ipkt_prev = NULL;
9377 	} else {
9378 		/* this is the first/only pkt on the list */
9379 		pptr->port_ipkt_list = icmd;
9380 		icmd->ipkt_next = NULL;
9381 		icmd->ipkt_prev = NULL;
9382 	}
9383 	mutex_exit(&pptr->port_mutex);
9384 }
9385 
9386 /*
9387  *     Function: fcp_transport
9388  *
9389  *  Description: This function submits the Fibre Channel packet to the transort
9390  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9391  *		 fails the submission, the treatment depends on the value of
9392  *		 the variable internal.
9393  *
9394  *     Argument: port_handle	fp/fctl port handle.
9395  *		 *fpkt		Packet to submit to the transport layer.
9396  *		 internal	Not zero when it's an internal packet.
9397  *
9398  * Return Value: FC_TRAN_BUSY
9399  *		 FC_STATEC_BUSY
9400  *		 FC_OFFLINE
9401  *		 FC_LOGINREQ
9402  *		 FC_DEVICE_BUSY
9403  *		 FC_SUCCESS
9404  */
9405 static int
9406 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9407 {
9408 	int	rval;
9409 
9410 	rval = fc_ulp_transport(port_handle, fpkt);
9411 	if (rval == FC_SUCCESS) {
9412 		return (rval);
9413 	}
9414 
9415 	/*
9416 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9417 	 * a command, if the underlying modules see that there is a state
9418 	 * change, or if a port is OFFLINE, that means, that state change
9419 	 * hasn't reached FCP yet, so re-queue the command for deferred
9420 	 * submission.
9421 	 */
9422 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9423 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9424 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9425 		/*
9426 		 * Defer packet re-submission. Life hang is possible on
9427 		 * internal commands if the port driver sends FC_STATEC_BUSY
9428 		 * for ever, but that shouldn't happen in a good environment.
9429 		 * Limiting re-transport for internal commands is probably a
9430 		 * good idea..
9431 		 * A race condition can happen when a port sees barrage of
9432 		 * link transitions offline to online. If the FCTL has
9433 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9434 		 * internal commands should be queued to do the discovery.
9435 		 * The race condition is when an online comes and FCP starts
9436 		 * its internal discovery and the link goes offline. It is
9437 		 * possible that the statec_callback has not reached FCP
9438 		 * and FCP is carrying on with its internal discovery.
9439 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9440 		 * that the link has gone offline. At this point FCP should
9441 		 * drop all the internal commands and wait for the
9442 		 * statec_callback. It will be facilitated by incrementing
9443 		 * port_link_cnt.
9444 		 *
9445 		 * For external commands, the (FC)pkt_timeout is decremented
9446 		 * by the QUEUE Delay added by our driver, Care is taken to
9447 		 * ensure that it doesn't become zero (zero means no timeout)
9448 		 * If the time expires right inside driver queue itself,
9449 		 * the watch thread will return it to the original caller
9450 		 * indicating that the command has timed-out.
9451 		 */
9452 		if (internal) {
9453 			char			*op;
9454 			struct fcp_ipkt	*icmd;
9455 
9456 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9457 			switch (icmd->ipkt_opcode) {
9458 			case SCMD_REPORT_LUN:
9459 				op = "REPORT LUN";
9460 				break;
9461 
9462 			case SCMD_INQUIRY:
9463 				op = "INQUIRY";
9464 				break;
9465 
9466 			case SCMD_INQUIRY_PAGE83:
9467 				op = "INQUIRY-83";
9468 				break;
9469 
9470 			default:
9471 				op = "Internal SCSI COMMAND";
9472 				break;
9473 			}
9474 
9475 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9476 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9477 				rval = FC_SUCCESS;
9478 			}
9479 		} else {
9480 			struct fcp_pkt *cmd;
9481 			struct fcp_port *pptr;
9482 
9483 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9484 			cmd->cmd_state = FCP_PKT_IDLE;
9485 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9486 
9487 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9488 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9489 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9490 				    "fcp_transport: xport busy for pkt %p",
9491 				    cmd->cmd_pkt);
9492 				rval = FC_TRAN_BUSY;
9493 			} else {
9494 				fcp_queue_pkt(pptr, cmd);
9495 				rval = FC_SUCCESS;
9496 			}
9497 		}
9498 	}
9499 
9500 	return (rval);
9501 }
9502 
9503 /*VARARGS3*/
9504 static void
9505 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9506 {
9507 	char		buf[256];
9508 	va_list		ap;
9509 
9510 	if (dip == NULL) {
9511 		dip = fcp_global_dip;
9512 	}
9513 
9514 	va_start(ap, fmt);
9515 	(void) vsprintf(buf, fmt, ap);
9516 	va_end(ap);
9517 
9518 	scsi_log(dip, "fcp", level, buf);
9519 }
9520 
9521 /*
9522  * This function retries NS registry of FC4 type.
9523  * It assumes that fcp_mutex is held.
9524  * The function does nothing if topology is not fabric
9525  * So, the topology has to be set before this function can be called
9526  */
9527 static void
9528 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9529 {
9530 	int	rval;
9531 
9532 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9533 
9534 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9535 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9536 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9537 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9538 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9539 		}
9540 		return;
9541 	}
9542 	mutex_exit(&pptr->port_mutex);
9543 	rval = fcp_do_ns_registry(pptr, s_id);
9544 	mutex_enter(&pptr->port_mutex);
9545 
9546 	if (rval == 0) {
9547 		/* Registry successful. Reset flag */
9548 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9549 	}
9550 }
9551 
9552 /*
9553  * This function registers the ULP with the switch by calling transport i/f
9554  */
9555 static int
9556 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9557 {
9558 	fc_ns_cmd_t		ns_cmd;
9559 	ns_rfc_type_t		rfc;
9560 	uint32_t		types[8];
9561 
9562 	/*
9563 	 * Prepare the Name server structure to
9564 	 * register with the transport in case of
9565 	 * Fabric configuration.
9566 	 */
9567 	bzero(&rfc, sizeof (rfc));
9568 	bzero(types, sizeof (types));
9569 
9570 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9571 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9572 
9573 	rfc.rfc_port_id.port_id = s_id;
9574 	bcopy(types, rfc.rfc_types, sizeof (types));
9575 
9576 	ns_cmd.ns_flags = 0;
9577 	ns_cmd.ns_cmd = NS_RFT_ID;
9578 	ns_cmd.ns_req_len = sizeof (rfc);
9579 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9580 	ns_cmd.ns_resp_len = 0;
9581 	ns_cmd.ns_resp_payload = NULL;
9582 
9583 	/*
9584 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9585 	 */
9586 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9587 		fcp_log(CE_WARN, pptr->port_dip,
9588 		    "!ns_registry: failed name server registration");
9589 		return (1);
9590 	}
9591 
9592 	return (0);
9593 }
9594 
9595 /*
9596  *     Function: fcp_handle_port_attach
9597  *
9598  *  Description: This function is called from fcp_port_attach() to attach a
9599  *		 new port. This routine does the following:
9600  *
9601  *		1) Allocates an fcp_port structure and initializes it.
9602  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9603  *		   server.
9604  *		3) Kicks off the enumeration of the targets/luns visible
9605  *		   through this new port.  That is done by calling
9606  *		   fcp_statec_callback() if the port is online.
9607  *
9608  *     Argument: ulph		fp/fctl port handle.
9609  *		 *pinfo		Port information.
9610  *		 s_id		Port ID.
9611  *		 instance	Device instance number for the local port
9612  *				(returned by ddi_get_instance()).
9613  *
9614  * Return Value: DDI_SUCCESS
9615  *		 DDI_FAILURE
9616  *
9617  *	Context: User and Kernel context.
9618  */
9619 /*ARGSUSED*/
9620 int
9621 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9622     uint32_t s_id, int instance)
9623 {
9624 	int			res = DDI_FAILURE;
9625 	scsi_hba_tran_t		*tran;
9626 	int			mutex_initted = FALSE;
9627 	int			hba_attached = FALSE;
9628 	int			soft_state_linked = FALSE;
9629 	int			event_bind = FALSE;
9630 	struct fcp_port		*pptr;
9631 	fc_portmap_t		*tmp_list = NULL;
9632 	uint32_t		max_cnt, alloc_cnt;
9633 	uchar_t			*boot_wwn = NULL;
9634 	uint_t			nbytes;
9635 	int			manual_cfg;
9636 
9637 	/*
9638 	 * this port instance attaching for the first time (or after
9639 	 * being detached before)
9640 	 */
9641 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9642 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9643 
9644 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9645 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9646 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9647 		    instance);
9648 		return (res);
9649 	}
9650 
9651 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9652 		/* this shouldn't happen */
9653 		ddi_soft_state_free(fcp_softstate, instance);
9654 		cmn_err(CE_WARN, "fcp: bad soft state");
9655 		return (res);
9656 	}
9657 
9658 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9659 
9660 	/*
9661 	 * Make a copy of ulp_port_info as fctl allocates
9662 	 * a temp struct.
9663 	 */
9664 	(void) fcp_cp_pinfo(pptr, pinfo);
9665 
9666 	/*
9667 	 * Check for manual_configuration_only property.
9668 	 * Enable manual configurtion if the property is
9669 	 * set to 1, otherwise disable manual configuration.
9670 	 */
9671 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9672 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9673 	    MANUAL_CFG_ONLY,
9674 	    -1)) != -1) {
9675 		if (manual_cfg == 1) {
9676 			char	*pathname;
9677 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9678 			(void) ddi_pathname(pptr->port_dip, pathname);
9679 			cmn_err(CE_NOTE,
9680 			    "%s (%s%d) %s is enabled via %s.conf.",
9681 			    pathname,
9682 			    ddi_driver_name(pptr->port_dip),
9683 			    ddi_get_instance(pptr->port_dip),
9684 			    MANUAL_CFG_ONLY,
9685 			    ddi_driver_name(pptr->port_dip));
9686 			fcp_enable_auto_configuration = 0;
9687 			kmem_free(pathname, MAXPATHLEN);
9688 		}
9689 	}
9690 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9691 	pptr->port_link_cnt = 1;
9692 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9693 	pptr->port_id = s_id;
9694 	pptr->port_instance = instance;
9695 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state))
9696 	pptr->port_state = FCP_STATE_INIT;
9697 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state))
9698 
9699 	pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen *
9700 	    sizeof (ddi_dma_cookie_t));
9701 
9702 	/*
9703 	 * The two mutexes of fcp_port are initialized.	 The variable
9704 	 * mutex_initted is incremented to remember that fact.	That variable
9705 	 * is checked when the routine fails and the mutexes have to be
9706 	 * destroyed.
9707 	 */
9708 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9709 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9710 	mutex_initted++;
9711 
9712 	/*
9713 	 * The SCSI tran structure is allocate and initialized now.
9714 	 */
9715 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9716 		fcp_log(CE_WARN, pptr->port_dip,
9717 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9718 		goto fail;
9719 	}
9720 
9721 	/* link in the transport structure then fill it in */
9722 	pptr->port_tran = tran;
9723 	tran->tran_hba_private		= pptr;
9724 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9725 	tran->tran_tgt_probe		= NULL;
9726 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9727 	tran->tran_start		= fcp_scsi_start;
9728 	tran->tran_reset		= fcp_scsi_reset;
9729 	tran->tran_abort		= fcp_scsi_abort;
9730 	tran->tran_getcap		= fcp_scsi_getcap;
9731 	tran->tran_setcap		= fcp_scsi_setcap;
9732 	tran->tran_init_pkt		= NULL;
9733 	tran->tran_destroy_pkt		= NULL;
9734 	tran->tran_dmafree		= NULL;
9735 	tran->tran_sync_pkt		= NULL;
9736 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9737 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9738 	tran->tran_get_name		= fcp_scsi_get_name;
9739 	tran->tran_clear_aca		= NULL;
9740 	tran->tran_clear_task_set	= NULL;
9741 	tran->tran_terminate_task	= NULL;
9742 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9743 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9744 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9745 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9746 	tran->tran_quiesce		= NULL;
9747 	tran->tran_unquiesce		= NULL;
9748 	tran->tran_bus_reset		= NULL;
9749 	tran->tran_bus_config		= fcp_scsi_bus_config;
9750 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9751 	tran->tran_bus_power		= NULL;
9752 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9753 
9754 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9755 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9756 	tran->tran_setup_pkt		= fcp_pkt_setup;
9757 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9758 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9759 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9760 
9761 	/*
9762 	 * Allocate an ndi event handle
9763 	 */
9764 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9765 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9766 
9767 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9768 	    sizeof (fcp_ndi_event_defs));
9769 
9770 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9771 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9772 
9773 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9774 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9775 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9776 
9777 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9778 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9779 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9780 		goto fail;
9781 	}
9782 	event_bind++;	/* Checked in fail case */
9783 
9784 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9785 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9786 	    != DDI_SUCCESS) {
9787 		fcp_log(CE_WARN, pptr->port_dip,
9788 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9789 		goto fail;
9790 	}
9791 	hba_attached++;	/* Checked in fail case */
9792 
9793 	pptr->port_mpxio = 0;
9794 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9795 	    MDI_SUCCESS) {
9796 		pptr->port_mpxio++;
9797 	}
9798 
9799 	/*
9800 	 * The following code is putting the new port structure in the global
9801 	 * list of ports and, if it is the first port to attach, it start the
9802 	 * fcp_watchdog_tick.
9803 	 *
9804 	 * Why put this new port in the global before we are done attaching it?
9805 	 * We are actually making the structure globally known before we are
9806 	 * done attaching it.  The reason for that is: because of the code that
9807 	 * follows.  At this point the resources to handle the port are
9808 	 * allocated.  This function is now going to do the following:
9809 	 *
9810 	 *   1) It is going to try to register with the name server advertizing
9811 	 *	the new FCP capability of the port.
9812 	 *   2) It is going to play the role of the fp/fctl layer by building
9813 	 *	a list of worlwide names reachable through this port and call
9814 	 *	itself on fcp_statec_callback().  That requires the port to
9815 	 *	be part of the global list.
9816 	 */
9817 	mutex_enter(&fcp_global_mutex);
9818 	if (fcp_port_head == NULL) {
9819 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9820 	}
9821 	pptr->port_next = fcp_port_head;
9822 	fcp_port_head = pptr;
9823 	soft_state_linked++;
9824 
9825 	if (fcp_watchdog_init++ == 0) {
9826 		fcp_watchdog_tick = fcp_watchdog_timeout *
9827 		    drv_usectohz(1000000);
9828 		fcp_watchdog_id = timeout(fcp_watch, NULL,
9829 		    fcp_watchdog_tick);
9830 	}
9831 	mutex_exit(&fcp_global_mutex);
9832 
9833 	/*
9834 	 * Here an attempt is made to register with the name server, the new
9835 	 * FCP capability.  That is done using an RTF_ID to the name server.
9836 	 * It is done synchronously.  The function fcp_do_ns_registry()
9837 	 * doesn't return till the name server responded.
9838 	 * On failures, just ignore it for now and it will get retried during
9839 	 * state change callbacks. We'll set a flag to show this failure
9840 	 */
9841 	if (fcp_do_ns_registry(pptr, s_id)) {
9842 		mutex_enter(&pptr->port_mutex);
9843 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9844 		mutex_exit(&pptr->port_mutex);
9845 	} else {
9846 		mutex_enter(&pptr->port_mutex);
9847 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9848 		mutex_exit(&pptr->port_mutex);
9849 	}
9850 
9851 	/*
9852 	 * Lookup for boot WWN property
9853 	 */
9854 	if (modrootloaded != 1) {
9855 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9856 		    ddi_get_parent(pinfo->port_dip),
9857 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9858 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9859 		    (nbytes == FC_WWN_SIZE)) {
9860 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9861 		}
9862 		if (boot_wwn) {
9863 			ddi_prop_free(boot_wwn);
9864 		}
9865 	}
9866 
9867 	/*
9868 	 * Handle various topologies and link states.
9869 	 */
9870 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
9871 	case FC_STATE_OFFLINE:
9872 
9873 		/*
9874 		 * we're attaching a port where the link is offline
9875 		 *
9876 		 * Wait for ONLINE, at which time a state
9877 		 * change will cause a statec_callback
9878 		 *
9879 		 * in the mean time, do not do anything
9880 		 */
9881 		res = DDI_SUCCESS;
9882 		pptr->port_state |= FCP_STATE_OFFLINE;
9883 		break;
9884 
9885 	case FC_STATE_ONLINE: {
9886 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
9887 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
9888 			res = DDI_SUCCESS;
9889 			break;
9890 		}
9891 		/*
9892 		 * discover devices and create nodes (a private
9893 		 * loop or point-to-point)
9894 		 */
9895 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
9896 
9897 		/*
9898 		 * At this point we are going to build a list of all the ports
9899 		 * that	can be reached through this local port.	 It looks like
9900 		 * we cannot handle more than FCP_MAX_DEVICES per local port
9901 		 * (128).
9902 		 */
9903 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
9904 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
9905 		    KM_NOSLEEP)) == NULL) {
9906 			fcp_log(CE_WARN, pptr->port_dip,
9907 			    "!fcp%d: failed to allocate portmap",
9908 			    instance);
9909 			goto fail;
9910 		}
9911 
9912 		/*
9913 		 * fc_ulp_getportmap() is going to provide us with the list of
9914 		 * remote ports in the buffer we just allocated.  The way the
9915 		 * list is going to be retrieved depends on the topology.
9916 		 * However, if we are connected to a Fabric, a name server
9917 		 * request may be sent to get the list of FCP capable ports.
9918 		 * It should be noted that is the case the request is
9919 		 * synchronous.	 This means we are stuck here till the name
9920 		 * server replies.  A lot of things can change during that time
9921 		 * and including, may be, being called on
9922 		 * fcp_statec_callback() for different reasons. I'm not sure
9923 		 * the code can handle that.
9924 		 */
9925 		max_cnt = FCP_MAX_DEVICES;
9926 		alloc_cnt = FCP_MAX_DEVICES;
9927 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
9928 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
9929 		    FC_SUCCESS) {
9930 			caddr_t msg;
9931 
9932 			(void) fc_ulp_error(res, &msg);
9933 
9934 			/*
9935 			 * this	 just means the transport is
9936 			 * busy perhaps building a portmap so,
9937 			 * for now, succeed this port attach
9938 			 * when the transport has a new map,
9939 			 * it'll send us a state change then
9940 			 */
9941 			fcp_log(CE_WARN, pptr->port_dip,
9942 			    "!failed to get port map : %s", msg);
9943 
9944 			res = DDI_SUCCESS;
9945 			break;	/* go return result */
9946 		}
9947 		if (max_cnt > alloc_cnt) {
9948 			alloc_cnt = max_cnt;
9949 		}
9950 
9951 		/*
9952 		 * We are now going to call fcp_statec_callback() ourselves.
9953 		 * By issuing this call we are trying to kick off the enumera-
9954 		 * tion process.
9955 		 */
9956 		/*
9957 		 * let the state change callback do the SCSI device
9958 		 * discovery and create the devinfos
9959 		 */
9960 		fcp_statec_callback(ulph, pptr->port_fp_handle,
9961 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
9962 		    max_cnt, pptr->port_id);
9963 
9964 		res = DDI_SUCCESS;
9965 		break;
9966 	}
9967 
9968 	default:
9969 		/* unknown port state */
9970 		fcp_log(CE_WARN, pptr->port_dip,
9971 		    "!fcp%d: invalid port state at attach=0x%x",
9972 		    instance, pptr->port_phys_state);
9973 
9974 		mutex_enter(&pptr->port_mutex);
9975 		pptr->port_phys_state = FCP_STATE_OFFLINE;
9976 		mutex_exit(&pptr->port_mutex);
9977 
9978 		res = DDI_SUCCESS;
9979 		break;
9980 	}
9981 
9982 	/* free temp list if used */
9983 	if (tmp_list != NULL) {
9984 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
9985 	}
9986 
9987 	/* note the attach time */
9988 	pptr->port_attach_time = lbolt64;
9989 
9990 	/* all done */
9991 	return (res);
9992 
9993 	/* a failure we have to clean up after */
9994 fail:
9995 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
9996 
9997 	if (soft_state_linked) {
9998 		/* remove this fcp_port from the linked list */
9999 		(void) fcp_soft_state_unlink(pptr);
10000 	}
10001 
10002 	/* unbind and free event set */
10003 	if (pptr->port_ndi_event_hdl) {
10004 		if (event_bind) {
10005 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10006 			    &pptr->port_ndi_events, NDI_SLEEP);
10007 		}
10008 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10009 	}
10010 
10011 	if (pptr->port_ndi_event_defs) {
10012 		(void) kmem_free(pptr->port_ndi_event_defs,
10013 		    sizeof (fcp_ndi_event_defs));
10014 	}
10015 
10016 	/*
10017 	 * Clean up mpxio stuff
10018 	 */
10019 	if (pptr->port_mpxio) {
10020 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10021 		pptr->port_mpxio--;
10022 	}
10023 
10024 	/* undo SCSI HBA setup */
10025 	if (hba_attached) {
10026 		(void) scsi_hba_detach(pptr->port_dip);
10027 	}
10028 	if (pptr->port_tran != NULL) {
10029 		scsi_hba_tran_free(pptr->port_tran);
10030 	}
10031 
10032 	mutex_enter(&fcp_global_mutex);
10033 
10034 	/*
10035 	 * We check soft_state_linked, because it is incremented right before
10036 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10037 	 * soft_state_linked is still FALSE, we do not want to decrement
10038 	 * fcp_watchdog_init or possibly call untimeout.
10039 	 */
10040 
10041 	if (soft_state_linked) {
10042 		if (--fcp_watchdog_init == 0) {
10043 			timeout_id_t	tid = fcp_watchdog_id;
10044 
10045 			mutex_exit(&fcp_global_mutex);
10046 			(void) untimeout(tid);
10047 		} else {
10048 			mutex_exit(&fcp_global_mutex);
10049 		}
10050 	} else {
10051 		mutex_exit(&fcp_global_mutex);
10052 	}
10053 
10054 	if (mutex_initted) {
10055 		mutex_destroy(&pptr->port_mutex);
10056 		mutex_destroy(&pptr->port_pkt_mutex);
10057 	}
10058 
10059 	if (tmp_list != NULL) {
10060 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10061 	}
10062 
10063 	/* this makes pptr invalid */
10064 	ddi_soft_state_free(fcp_softstate, instance);
10065 
10066 	return (DDI_FAILURE);
10067 }
10068 
10069 
10070 static int
10071 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10072 {
10073 	int count = 0;
10074 
10075 	mutex_enter(&pptr->port_mutex);
10076 
10077 	/*
10078 	 * if the port is powered down or suspended, nothing else
10079 	 * to do; just return.
10080 	 */
10081 	if (flag != FCP_STATE_DETACHING) {
10082 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10083 		    FCP_STATE_SUSPENDED)) {
10084 			pptr->port_state |= flag;
10085 			mutex_exit(&pptr->port_mutex);
10086 			return (FC_SUCCESS);
10087 		}
10088 	}
10089 
10090 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10091 		mutex_exit(&pptr->port_mutex);
10092 		return (FC_FAILURE);
10093 	}
10094 
10095 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10096 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10097 	    "fcp_handle_port_detach: port is detaching");
10098 
10099 	pptr->port_state |= flag;
10100 
10101 	/*
10102 	 * Wait for any ongoing reconfig/ipkt to complete, that
10103 	 * ensures the freeing to targets/luns is safe.
10104 	 * No more ref to this port should happen from statec/ioctl
10105 	 * after that as it was removed from the global port list.
10106 	 */
10107 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10108 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10109 		/*
10110 		 * Let's give sufficient time for reconfig/ipkt
10111 		 * to complete.
10112 		 */
10113 		if (count++ >= FCP_ICMD_DEADLINE) {
10114 			break;
10115 		}
10116 		mutex_exit(&pptr->port_mutex);
10117 		delay(drv_usectohz(1000000));
10118 		mutex_enter(&pptr->port_mutex);
10119 	}
10120 
10121 	/*
10122 	 * if the driver is still busy then fail to
10123 	 * suspend/power down.
10124 	 */
10125 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10126 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10127 		pptr->port_state &= ~flag;
10128 		mutex_exit(&pptr->port_mutex);
10129 		return (FC_FAILURE);
10130 	}
10131 
10132 	if (flag == FCP_STATE_DETACHING) {
10133 		pptr = fcp_soft_state_unlink(pptr);
10134 		ASSERT(pptr != NULL);
10135 	}
10136 
10137 	pptr->port_link_cnt++;
10138 	pptr->port_state |= FCP_STATE_OFFLINE;
10139 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10140 
10141 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10142 	    FCP_CAUSE_LINK_DOWN);
10143 	mutex_exit(&pptr->port_mutex);
10144 
10145 	/* kill watch dog timer if we're the last */
10146 	mutex_enter(&fcp_global_mutex);
10147 	if (--fcp_watchdog_init == 0) {
10148 		timeout_id_t	tid = fcp_watchdog_id;
10149 		mutex_exit(&fcp_global_mutex);
10150 		(void) untimeout(tid);
10151 	} else {
10152 		mutex_exit(&fcp_global_mutex);
10153 	}
10154 
10155 	/* clean up the port structures */
10156 	if (flag == FCP_STATE_DETACHING) {
10157 		fcp_cleanup_port(pptr, instance);
10158 	}
10159 
10160 	return (FC_SUCCESS);
10161 }
10162 
10163 
10164 static void
10165 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10166 {
10167 	ASSERT(pptr != NULL);
10168 
10169 	/* unbind and free event set */
10170 	if (pptr->port_ndi_event_hdl) {
10171 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10172 		    &pptr->port_ndi_events, NDI_SLEEP);
10173 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10174 	}
10175 
10176 	if (pptr->port_ndi_event_defs) {
10177 		(void) kmem_free(pptr->port_ndi_event_defs,
10178 		    sizeof (fcp_ndi_event_defs));
10179 	}
10180 
10181 	/* free the lun/target structures and devinfos */
10182 	fcp_free_targets(pptr);
10183 
10184 	/*
10185 	 * Clean up mpxio stuff
10186 	 */
10187 	if (pptr->port_mpxio) {
10188 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10189 		pptr->port_mpxio--;
10190 	}
10191 
10192 	/* clean up SCSA stuff */
10193 	(void) scsi_hba_detach(pptr->port_dip);
10194 	if (pptr->port_tran != NULL) {
10195 		scsi_hba_tran_free(pptr->port_tran);
10196 	}
10197 
10198 #ifdef	KSTATS_CODE
10199 	/* clean up kstats */
10200 	if (pptr->fcp_ksp != NULL) {
10201 		kstat_delete(pptr->fcp_ksp);
10202 	}
10203 #endif
10204 
10205 	/* clean up soft state mutexes/condition variables */
10206 	mutex_destroy(&pptr->port_mutex);
10207 	mutex_destroy(&pptr->port_pkt_mutex);
10208 
10209 	/* all done with soft state */
10210 	ddi_soft_state_free(fcp_softstate, instance);
10211 }
10212 
10213 /*
10214  *     Function: fcp_kmem_cache_constructor
10215  *
10216  *  Description: This function allocates and initializes the resources required
10217  *		 to build a scsi_pkt structure the target driver.  The result
10218  *		 of the allocation and initialization will be cached in the
10219  *		 memory cache.	As DMA resources may be allocated here, that
10220  *		 means DMA resources will be tied up in the cache manager.
10221  *		 This is a tradeoff that has been made for performance reasons.
10222  *
10223  *     Argument: *buf		Memory to preinitialize.
10224  *		 *arg		FCP port structure (fcp_port).
10225  *		 kmflags	Value passed to kmem_cache_alloc() and
10226  *				propagated to the constructor.
10227  *
10228  * Return Value: 0	Allocation/Initialization was successful.
10229  *		 -1	Allocation or Initialization failed.
10230  *
10231  *
10232  * If the returned value is 0, the buffer is initialized like this:
10233  *
10234  *		    +================================+
10235  *	     +----> |	      struct scsi_pkt	     |
10236  *	     |	    |				     |
10237  *	     | +--- | pkt_ha_private		     |
10238  *	     | |    |				     |
10239  *	     | |    +================================+
10240  *	     | |
10241  *	     | |    +================================+
10242  *	     | +--> |	    struct fcp_pkt	     | <---------+
10243  *	     |	    |				     |		 |
10244  *	     +----- | cmd_pkt			     |		 |
10245  *		    |			  cmd_fp_pkt | ---+	 |
10246  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10247  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10248  *	  |    |    |--------------------------------|	  |	 |
10249  *	  |    |    |	      struct fc_packet	     | <--+	 |
10250  *	  |    |    |				     |		 |
10251  *	  |    |    |		     pkt_ulp_private | ----------+
10252  *	  |    |    |		     pkt_fca_private | -----+
10253  *	  |    |    |		     pkt_data_cookie | ---+ |
10254  *	  |    |    | pkt_cmdlen		     |	  | |
10255  *	  |    |(a) | pkt_rsplen		     |	  | |
10256  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10257  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10258  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10259  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10260  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10261  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10262  *		    +================================+	  | |  |   |   |    |
10263  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10264  *		    |				     |	    |  |   |   |    |
10265  *		    +================================+	    |  |   |   |    |
10266  *		    |	      fca_private	     | <----+  |   |   |    |
10267  *		    |				     |	       |   |   |    |
10268  *		    +================================+	       |   |   |    |
10269  *							       |   |   |    |
10270  *							       |   |   |    |
10271  *		    +================================+	 (d)   |   |   |    |
10272  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10273  *		    |				     |		   |   |    |
10274  *		    +================================+		   |   |    |
10275  *								   |   |    |
10276  *		    +================================+	 (d)	   |   |    |
10277  *		    |		fcp_resp	     | <-----------+   |    |
10278  *		    |	(DMA resources associated)   |		       |    |
10279  *		    +================================+		       |    |
10280  *								       |    |
10281  *								       |    |
10282  *								       |    |
10283  *		    +================================+	 (c)	       |    |
10284  *		    |	     fcp_cmd cookies	     | <---------------+    |
10285  *		    |				     |			    |
10286  *		    +================================+			    |
10287  *									    |
10288  *		    +================================+	 (c)		    |
10289  *		    |		 fcp_cmd	     | <--------------------+
10290  *		    |	(DMA resources associated)   |
10291  *		    +================================+
10292  *
10293  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10294  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10295  * (c) Only if DMA is used for the FCP_CMD buffer.
10296  * (d) Only if DMA is used for the FCP_RESP buffer
10297  */
10298 static int
10299 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10300     int kmflags)
10301 {
10302 	struct fcp_pkt	*cmd;
10303 	struct fcp_port	*pptr;
10304 	fc_packet_t	*fpkt;
10305 
10306 	pptr = (struct fcp_port *)tran->tran_hba_private;
10307 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10308 	bzero(cmd, tran->tran_hba_len);
10309 
10310 	cmd->cmd_pkt = pkt;
10311 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10312 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10313 	cmd->cmd_fp_pkt = fpkt;
10314 
10315 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10316 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10317 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10318 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10319 
10320 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10321 	    sizeof (struct fcp_pkt));
10322 
10323 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10324 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10325 
10326 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10327 		/*
10328 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10329 		 * fcp_resp.  The transfer of information will be done by
10330 		 * bcopy.
10331 		 * The naming of the flags (that is actually a value) is
10332 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10333 		 * DMA" but instead "NO DMA".
10334 		 */
10335 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10336 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10337 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10338 	} else {
10339 		/*
10340 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10341 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10342 		 * interfaces.
10343 		 */
10344 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10345 			return (-1);
10346 		}
10347 	}
10348 
10349 	return (0);
10350 }
10351 
10352 /*
10353  *     Function: fcp_kmem_cache_destructor
10354  *
10355  *  Description: Called by the destructor of the cache managed by SCSA.
10356  *		 All the resources pre-allocated in fcp_pkt_constructor
10357  *		 and the data also pre-initialized in fcp_pkt_constructor
10358  *		 are freed and uninitialized here.
10359  *
10360  *     Argument: *buf		Memory to uninitialize.
10361  *		 *arg		FCP port structure (fcp_port).
10362  *
10363  * Return Value: None
10364  *
10365  *	Context: kernel
10366  */
10367 static void
10368 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10369 {
10370 	struct fcp_pkt	*cmd;
10371 	struct fcp_port	*pptr;
10372 
10373 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10374 	cmd = pkt->pkt_ha_private;
10375 
10376 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10377 		/*
10378 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10379 		 * buffer and DMA resources allocated to do so are released.
10380 		 */
10381 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10382 	}
10383 }
10384 
10385 /*
10386  *     Function: fcp_alloc_cmd_resp
10387  *
10388  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10389  *		 will be DMAed by the HBA.  The buffer is allocated applying
10390  *		 the DMA requirements for the HBA.  The buffers allocated will
10391  *		 also be bound.	 DMA resources are allocated in the process.
10392  *		 They will be released by fcp_free_cmd_resp().
10393  *
10394  *     Argument: *pptr	FCP port.
10395  *		 *fpkt	fc packet for which the cmd and resp packet should be
10396  *			allocated.
10397  *		 flags	Allocation flags.
10398  *
10399  * Return Value: FC_FAILURE
10400  *		 FC_SUCCESS
10401  *
10402  *	Context: User or Kernel context only if flags == KM_SLEEP.
10403  *		 Interrupt context if the KM_SLEEP is not specified.
10404  */
10405 static int
10406 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10407 {
10408 	int			rval;
10409 	int			cmd_len;
10410 	int			resp_len;
10411 	ulong_t			real_len;
10412 	int			(*cb) (caddr_t);
10413 	ddi_dma_cookie_t	pkt_cookie;
10414 	ddi_dma_cookie_t	*cp;
10415 	uint32_t		cnt;
10416 
10417 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10418 
10419 	cmd_len = fpkt->pkt_cmdlen;
10420 	resp_len = fpkt->pkt_rsplen;
10421 
10422 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10423 
10424 	/* Allocation of a DMA handle used in subsequent calls. */
10425 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10426 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10427 		return (FC_FAILURE);
10428 	}
10429 
10430 	/* A buffer is allocated that satisfies the DMA requirements. */
10431 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10432 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10433 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10434 
10435 	if (rval != DDI_SUCCESS) {
10436 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10437 		return (FC_FAILURE);
10438 	}
10439 
10440 	if (real_len < cmd_len) {
10441 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10442 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10443 		return (FC_FAILURE);
10444 	}
10445 
10446 	/* The buffer allocated is DMA bound. */
10447 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10448 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10449 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10450 
10451 	if (rval != DDI_DMA_MAPPED) {
10452 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10453 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10454 		return (FC_FAILURE);
10455 	}
10456 
10457 	if (fpkt->pkt_cmd_cookie_cnt >
10458 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10459 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10460 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10461 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10462 		return (FC_FAILURE);
10463 	}
10464 
10465 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10466 
10467 	/*
10468 	 * The buffer where the scatter/gather list is going to be built is
10469 	 * allocated.
10470 	 */
10471 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10472 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10473 	    KM_NOSLEEP);
10474 
10475 	if (cp == NULL) {
10476 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10477 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10478 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10479 		return (FC_FAILURE);
10480 	}
10481 
10482 	/*
10483 	 * The scatter/gather list for the buffer we just allocated is built
10484 	 * here.
10485 	 */
10486 	*cp = pkt_cookie;
10487 	cp++;
10488 
10489 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10490 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10491 		    &pkt_cookie);
10492 		*cp = pkt_cookie;
10493 	}
10494 
10495 	ASSERT(fpkt->pkt_resp_dma == NULL);
10496 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10497 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10498 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10499 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10500 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10501 		return (FC_FAILURE);
10502 	}
10503 
10504 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10505 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10506 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10507 	    &fpkt->pkt_resp_acc);
10508 
10509 	if (rval != DDI_SUCCESS) {
10510 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10511 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10512 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10513 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10514 		kmem_free(fpkt->pkt_cmd_cookie,
10515 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10516 		return (FC_FAILURE);
10517 	}
10518 
10519 	if (real_len < resp_len) {
10520 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10521 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10522 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10523 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10524 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10525 		kmem_free(fpkt->pkt_cmd_cookie,
10526 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10527 		return (FC_FAILURE);
10528 	}
10529 
10530 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10531 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10532 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10533 
10534 	if (rval != DDI_DMA_MAPPED) {
10535 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10536 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10537 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10538 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10539 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10540 		kmem_free(fpkt->pkt_cmd_cookie,
10541 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10542 		return (FC_FAILURE);
10543 	}
10544 
10545 	if (fpkt->pkt_resp_cookie_cnt >
10546 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10547 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10548 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10549 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10550 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10551 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10552 		kmem_free(fpkt->pkt_cmd_cookie,
10553 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10554 		return (FC_FAILURE);
10555 	}
10556 
10557 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10558 
10559 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10560 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10561 	    KM_NOSLEEP);
10562 
10563 	if (cp == NULL) {
10564 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10565 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10566 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10567 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10568 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10569 		kmem_free(fpkt->pkt_cmd_cookie,
10570 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10571 		return (FC_FAILURE);
10572 	}
10573 
10574 	*cp = pkt_cookie;
10575 	cp++;
10576 
10577 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10578 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10579 		    &pkt_cookie);
10580 		*cp = pkt_cookie;
10581 	}
10582 
10583 	return (FC_SUCCESS);
10584 }
10585 
10586 /*
10587  *     Function: fcp_free_cmd_resp
10588  *
10589  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10590  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10591  *		 associated with them.	That includes the DMA resources and the
10592  *		 buffer allocated for the cookies of each one of them.
10593  *
10594  *     Argument: *pptr		FCP port context.
10595  *		 *fpkt		fc packet containing the cmd and resp packet
10596  *				to be released.
10597  *
10598  * Return Value: None
10599  *
10600  *	Context: Interrupt, User and Kernel context.
10601  */
10602 /* ARGSUSED */
10603 static void
10604 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10605 {
10606 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10607 
10608 	if (fpkt->pkt_resp_dma) {
10609 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10610 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10611 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10612 	}
10613 
10614 	if (fpkt->pkt_resp_cookie) {
10615 		kmem_free(fpkt->pkt_resp_cookie,
10616 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10617 		fpkt->pkt_resp_cookie = NULL;
10618 	}
10619 
10620 	if (fpkt->pkt_cmd_dma) {
10621 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10622 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10623 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10624 	}
10625 
10626 	if (fpkt->pkt_cmd_cookie) {
10627 		kmem_free(fpkt->pkt_cmd_cookie,
10628 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10629 		fpkt->pkt_cmd_cookie = NULL;
10630 	}
10631 }
10632 
10633 
10634 /*
10635  * called by the transport to do our own target initialization
10636  *
10637  * can acquire and release the global mutex
10638  */
10639 /* ARGSUSED */
10640 static int
10641 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10642     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10643 {
10644 	uchar_t			*bytes;
10645 	uint_t			nbytes;
10646 	uint16_t		lun_num;
10647 	struct fcp_tgt	*ptgt;
10648 	struct fcp_lun	*plun;
10649 	struct fcp_port	*pptr = (struct fcp_port *)
10650 	    hba_tran->tran_hba_private;
10651 
10652 	ASSERT(pptr != NULL);
10653 
10654 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10655 	    FCP_BUF_LEVEL_8, 0,
10656 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10657 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10658 
10659 	/* get our port WWN property */
10660 	bytes = NULL;
10661 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10662 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10663 	    (nbytes != FC_WWN_SIZE)) {
10664 		/* no port WWN property */
10665 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10666 		    FCP_BUF_LEVEL_8, 0,
10667 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10668 		    " for %s (instance %d): bytes=%p nbytes=%x",
10669 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10670 		    nbytes);
10671 
10672 		if (bytes != NULL) {
10673 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10674 		}
10675 
10676 		return (DDI_NOT_WELL_FORMED);
10677 	}
10678 	ASSERT(bytes != NULL);
10679 
10680 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10681 	    LUN_PROP, 0xFFFF);
10682 	if (lun_num == 0xFFFF) {
10683 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10684 		    FCP_BUF_LEVEL_8, 0,
10685 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10686 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10687 		    ddi_get_instance(tgt_dip));
10688 
10689 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10690 		return (DDI_NOT_WELL_FORMED);
10691 	}
10692 
10693 	mutex_enter(&pptr->port_mutex);
10694 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10695 		mutex_exit(&pptr->port_mutex);
10696 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10697 		    FCP_BUF_LEVEL_8, 0,
10698 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10699 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10700 		    ddi_get_instance(tgt_dip));
10701 
10702 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10703 		return (DDI_FAILURE);
10704 	}
10705 
10706 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10707 	    FC_WWN_SIZE) == 0);
10708 	ASSERT(plun->lun_num == lun_num);
10709 
10710 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10711 
10712 	ptgt = plun->lun_tgt;
10713 
10714 	mutex_enter(&ptgt->tgt_mutex);
10715 	plun->lun_tgt_count++;
10716 	scsi_device_hba_private_set(sd, plun);
10717 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10718 	plun->lun_sd = sd;
10719 	mutex_exit(&ptgt->tgt_mutex);
10720 	mutex_exit(&pptr->port_mutex);
10721 
10722 	return (DDI_SUCCESS);
10723 }
10724 
10725 /*ARGSUSED*/
10726 static int
10727 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10728     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10729 {
10730 	uchar_t			*bytes;
10731 	uint_t			nbytes;
10732 	uint16_t		lun_num;
10733 	struct fcp_tgt	*ptgt;
10734 	struct fcp_lun	*plun;
10735 	struct fcp_port	*pptr = (struct fcp_port *)
10736 	    hba_tran->tran_hba_private;
10737 	child_info_t		*cip;
10738 
10739 	ASSERT(pptr != NULL);
10740 
10741 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10742 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10743 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10744 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10745 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10746 
10747 	cip = (child_info_t *)sd->sd_pathinfo;
10748 	if (cip == NULL) {
10749 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10750 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10751 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10752 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10753 		    ddi_get_instance(tgt_dip));
10754 
10755 		return (DDI_NOT_WELL_FORMED);
10756 	}
10757 
10758 	/* get our port WWN property */
10759 	bytes = NULL;
10760 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10761 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10762 	    (nbytes != FC_WWN_SIZE)) {
10763 		if (bytes)
10764 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10765 		return (DDI_NOT_WELL_FORMED);
10766 	}
10767 
10768 	ASSERT(bytes != NULL);
10769 
10770 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10771 	    LUN_PROP, 0xFFFF);
10772 	if (lun_num == 0xFFFF) {
10773 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10774 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10775 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10776 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10777 		    ddi_get_instance(tgt_dip));
10778 
10779 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10780 		return (DDI_NOT_WELL_FORMED);
10781 	}
10782 
10783 	mutex_enter(&pptr->port_mutex);
10784 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10785 		mutex_exit(&pptr->port_mutex);
10786 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10787 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10788 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10789 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10790 		    ddi_get_instance(tgt_dip));
10791 
10792 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10793 		return (DDI_FAILURE);
10794 	}
10795 
10796 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10797 	    FC_WWN_SIZE) == 0);
10798 	ASSERT(plun->lun_num == lun_num);
10799 
10800 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10801 
10802 	ptgt = plun->lun_tgt;
10803 
10804 	mutex_enter(&ptgt->tgt_mutex);
10805 	plun->lun_tgt_count++;
10806 	scsi_device_hba_private_set(sd, plun);
10807 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10808 	plun->lun_sd = sd;
10809 	mutex_exit(&ptgt->tgt_mutex);
10810 	mutex_exit(&pptr->port_mutex);
10811 
10812 	return (DDI_SUCCESS);
10813 }
10814 
10815 
10816 /*
10817  * called by the transport to do our own target initialization
10818  *
10819  * can acquire and release the global mutex
10820  */
10821 /* ARGSUSED */
10822 static int
10823 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10824     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10825 {
10826 	struct fcp_port	*pptr = (struct fcp_port *)
10827 	    hba_tran->tran_hba_private;
10828 	int			rval;
10829 
10830 	ASSERT(pptr != NULL);
10831 
10832 	/*
10833 	 * Child node is getting initialized.  Look at the mpxio component
10834 	 * type on the child device to see if this device is mpxio managed
10835 	 * or not.
10836 	 */
10837 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10838 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10839 	} else {
10840 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10841 	}
10842 
10843 	return (rval);
10844 }
10845 
10846 
10847 /* ARGSUSED */
10848 static void
10849 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10850     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10851 {
10852 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
10853 	struct fcp_tgt	*ptgt;
10854 
10855 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10856 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10857 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10858 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10859 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10860 
10861 	if (plun == NULL) {
10862 		return;
10863 	}
10864 	ptgt = plun->lun_tgt;
10865 
10866 	ASSERT(ptgt != NULL);
10867 
10868 	mutex_enter(&ptgt->tgt_mutex);
10869 	ASSERT(plun->lun_tgt_count > 0);
10870 
10871 	if (--plun->lun_tgt_count == 0) {
10872 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
10873 	}
10874 	plun->lun_sd = NULL;
10875 	mutex_exit(&ptgt->tgt_mutex);
10876 }
10877 
10878 /*
10879  *     Function: fcp_scsi_start
10880  *
10881  *  Description: This function is called by the target driver to request a
10882  *		 command to be sent.
10883  *
10884  *     Argument: *ap		SCSI address of the device.
10885  *		 *pkt		SCSI packet containing the cmd to send.
10886  *
10887  * Return Value: TRAN_ACCEPT
10888  *		 TRAN_BUSY
10889  *		 TRAN_BADPKT
10890  *		 TRAN_FATAL_ERROR
10891  */
10892 static int
10893 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
10894 {
10895 	struct fcp_port	*pptr = ADDR2FCP(ap);
10896 	struct fcp_lun	*plun = ADDR2LUN(ap);
10897 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
10898 	struct fcp_tgt	*ptgt = plun->lun_tgt;
10899 	int			rval;
10900 
10901 	/* ensure command isn't already issued */
10902 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
10903 
10904 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10905 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
10906 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
10907 
10908 	/*
10909 	 * It is strange that we enter the fcp_port mutex and the target
10910 	 * mutex to check the lun state (which has a mutex of its own).
10911 	 */
10912 	mutex_enter(&pptr->port_mutex);
10913 	mutex_enter(&ptgt->tgt_mutex);
10914 
10915 	/*
10916 	 * If the device is offline and is not in the process of coming
10917 	 * online, fail the request.
10918 	 */
10919 
10920 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
10921 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
10922 		mutex_exit(&ptgt->tgt_mutex);
10923 		mutex_exit(&pptr->port_mutex);
10924 
10925 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
10926 			pkt->pkt_reason = CMD_DEV_GONE;
10927 		}
10928 
10929 		return (TRAN_FATAL_ERROR);
10930 	}
10931 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
10932 
10933 	/*
10934 	 * If we are suspended, kernel is trying to dump, so don't
10935 	 * block, fail or defer requests - send them down right away.
10936 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
10937 	 * assume we have been suspended.  There is hardware such as
10938 	 * the v880 that doesn't do PM.	 Thus, the check for
10939 	 * ddi_in_panic.
10940 	 *
10941 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
10942 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
10943 	 * either the device will have gone away or changed and we can fail
10944 	 * the request, or we can proceed if the device didn't change.
10945 	 *
10946 	 * If the pd in the target or the packet is NULL it's probably
10947 	 * because the device has gone away, we allow the request to be
10948 	 * put on the internal queue here in case the device comes back within
10949 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
10950 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
10951 	 * could be NULL because the device was disappearing during or since
10952 	 * packet initialization.
10953 	 */
10954 
10955 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
10956 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
10957 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
10958 	    (ptgt->tgt_pd_handle == NULL) ||
10959 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
10960 		/*
10961 		 * If ((LUN is busy AND
10962 		 *	LUN not suspended AND
10963 		 *	The system is not in panic state) OR
10964 		 *	(The port is coming up))
10965 		 *
10966 		 * We check to see if the any of the flags FLAG_NOINTR or
10967 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
10968 		 * returned will be TRAN_BUSY.	If not, the request is queued.
10969 		 */
10970 		mutex_exit(&ptgt->tgt_mutex);
10971 		mutex_exit(&pptr->port_mutex);
10972 
10973 		/* see if using interrupts is allowed (so queueing'll work) */
10974 		if (pkt->pkt_flags & FLAG_NOINTR) {
10975 			pkt->pkt_resid = 0;
10976 			return (TRAN_BUSY);
10977 		}
10978 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
10979 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10980 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
10981 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
10982 			return (TRAN_BUSY);
10983 		}
10984 #ifdef	DEBUG
10985 		mutex_enter(&pptr->port_pkt_mutex);
10986 		pptr->port_npkts++;
10987 		mutex_exit(&pptr->port_pkt_mutex);
10988 #endif /* DEBUG */
10989 
10990 		/* got queue up the pkt for later */
10991 		fcp_queue_pkt(pptr, cmd);
10992 		return (TRAN_ACCEPT);
10993 	}
10994 	cmd->cmd_state = FCP_PKT_ISSUED;
10995 
10996 	mutex_exit(&ptgt->tgt_mutex);
10997 	mutex_exit(&pptr->port_mutex);
10998 
10999 	/*
11000 	 * Now that we released the mutexes, what was protected by them can
11001 	 * change.
11002 	 */
11003 
11004 	/*
11005 	 * If there is a reconfiguration in progress, wait for it to complete.
11006 	 */
11007 	fcp_reconfig_wait(pptr);
11008 
11009 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11010 	    pkt->pkt_time : 0;
11011 
11012 	/* prepare the packet */
11013 
11014 	fcp_prepare_pkt(pptr, cmd, plun);
11015 
11016 	if (cmd->cmd_pkt->pkt_time) {
11017 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11018 	} else {
11019 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11020 	}
11021 
11022 	/*
11023 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11024 	 * have to do polled I/O
11025 	 */
11026 	if (pkt->pkt_flags & FLAG_NOINTR) {
11027 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11028 		return (fcp_dopoll(pptr, cmd));
11029 	}
11030 
11031 #ifdef	DEBUG
11032 	mutex_enter(&pptr->port_pkt_mutex);
11033 	pptr->port_npkts++;
11034 	mutex_exit(&pptr->port_pkt_mutex);
11035 #endif /* DEBUG */
11036 
11037 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11038 	if (rval == FC_SUCCESS) {
11039 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11040 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11041 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11042 		return (TRAN_ACCEPT);
11043 	}
11044 
11045 	cmd->cmd_state = FCP_PKT_IDLE;
11046 
11047 #ifdef	DEBUG
11048 	mutex_enter(&pptr->port_pkt_mutex);
11049 	pptr->port_npkts--;
11050 	mutex_exit(&pptr->port_pkt_mutex);
11051 #endif /* DEBUG */
11052 
11053 	/*
11054 	 * For lack of clearer definitions, choose
11055 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11056 	 */
11057 
11058 	if (rval == FC_TRAN_BUSY) {
11059 		pkt->pkt_resid = 0;
11060 		rval = TRAN_BUSY;
11061 	} else {
11062 		mutex_enter(&ptgt->tgt_mutex);
11063 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11064 			child_info_t	*cip;
11065 
11066 			mutex_enter(&plun->lun_mutex);
11067 			cip = plun->lun_cip;
11068 			mutex_exit(&plun->lun_mutex);
11069 
11070 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11071 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11072 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11073 			    plun->lun_tgt->tgt_d_id, rval, cip);
11074 
11075 			rval = TRAN_FATAL_ERROR;
11076 		} else {
11077 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11078 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11079 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11080 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11081 				    pkt);
11082 				rval = TRAN_BUSY;
11083 			} else {
11084 				rval = TRAN_ACCEPT;
11085 				fcp_queue_pkt(pptr, cmd);
11086 			}
11087 		}
11088 		mutex_exit(&ptgt->tgt_mutex);
11089 	}
11090 
11091 	return (rval);
11092 }
11093 
11094 /*
11095  * called by the transport to abort a packet
11096  */
11097 /*ARGSUSED*/
11098 static int
11099 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11100 {
11101 	int tgt_cnt;
11102 	struct fcp_port		*pptr = ADDR2FCP(ap);
11103 	struct fcp_lun	*plun = ADDR2LUN(ap);
11104 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11105 
11106 	if (pkt == NULL) {
11107 		if (ptgt) {
11108 			mutex_enter(&ptgt->tgt_mutex);
11109 			tgt_cnt = ptgt->tgt_change_cnt;
11110 			mutex_exit(&ptgt->tgt_mutex);
11111 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11112 			return (TRUE);
11113 		}
11114 	}
11115 	return (FALSE);
11116 }
11117 
11118 
11119 /*
11120  * Perform reset
11121  */
11122 int
11123 fcp_scsi_reset(struct scsi_address *ap, int level)
11124 {
11125 	int			rval = 0;
11126 	struct fcp_port		*pptr = ADDR2FCP(ap);
11127 	struct fcp_lun	*plun = ADDR2LUN(ap);
11128 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11129 
11130 	if (level == RESET_ALL) {
11131 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11132 			rval = 1;
11133 		}
11134 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11135 		/*
11136 		 * If we are in the middle of discovery, return
11137 		 * SUCCESS as this target will be rediscovered
11138 		 * anyway
11139 		 */
11140 		mutex_enter(&ptgt->tgt_mutex);
11141 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11142 			mutex_exit(&ptgt->tgt_mutex);
11143 			return (1);
11144 		}
11145 		mutex_exit(&ptgt->tgt_mutex);
11146 
11147 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11148 			rval = 1;
11149 		}
11150 	}
11151 	return (rval);
11152 }
11153 
11154 
11155 /*
11156  * called by the framework to get a SCSI capability
11157  */
11158 static int
11159 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11160 {
11161 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11162 }
11163 
11164 
11165 /*
11166  * called by the framework to set a SCSI capability
11167  */
11168 static int
11169 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11170 {
11171 	return (fcp_commoncap(ap, cap, value, whom, 1));
11172 }
11173 
11174 /*
11175  *     Function: fcp_pkt_setup
11176  *
11177  *  Description: This function sets up the scsi_pkt structure passed by the
11178  *		 caller. This function assumes fcp_pkt_constructor has been
11179  *		 called previously for the packet passed by the caller.	 If
11180  *		 successful this call will have the following results:
11181  *
11182  *		   - The resources needed that will be constant through out
11183  *		     the whole transaction are allocated.
11184  *		   - The fields that will be constant through out the whole
11185  *		     transaction are initialized.
11186  *		   - The scsi packet will be linked to the LUN structure
11187  *		     addressed by the transaction.
11188  *
11189  *     Argument:
11190  *		 *pkt		Pointer to a scsi_pkt structure.
11191  *		 callback
11192  *		 arg
11193  *
11194  * Return Value: 0	Success
11195  *		 !0	Failure
11196  *
11197  *	Context: Kernel context or interrupt context
11198  */
11199 /* ARGSUSED */
11200 static int
11201 fcp_pkt_setup(struct scsi_pkt *pkt,
11202     int (*callback)(caddr_t arg),
11203     caddr_t arg)
11204 {
11205 	struct fcp_pkt	*cmd;
11206 	struct fcp_port	*pptr;
11207 	struct fcp_lun	*plun;
11208 	struct fcp_tgt	*ptgt;
11209 	int		kf;
11210 	fc_packet_t	*fpkt;
11211 	fc_frame_hdr_t	*hp;
11212 
11213 	pptr = ADDR2FCP(&pkt->pkt_address);
11214 	plun = ADDR2LUN(&pkt->pkt_address);
11215 	ptgt = plun->lun_tgt;
11216 
11217 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11218 	fpkt = cmd->cmd_fp_pkt;
11219 
11220 	/*
11221 	 * this request is for dma allocation only
11222 	 */
11223 	/*
11224 	 * First step of fcp_scsi_init_pkt: pkt allocation
11225 	 * We determine if the caller is willing to wait for the
11226 	 * resources.
11227 	 */
11228 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11229 
11230 	/*
11231 	 * Selective zeroing of the pkt.
11232 	 */
11233 	cmd->cmd_back = NULL;
11234 	cmd->cmd_next = NULL;
11235 
11236 	/*
11237 	 * Zero out fcp command
11238 	 */
11239 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11240 
11241 	cmd->cmd_state = FCP_PKT_IDLE;
11242 
11243 	fpkt = cmd->cmd_fp_pkt;
11244 	fpkt->pkt_data_acc = NULL;
11245 
11246 	mutex_enter(&ptgt->tgt_mutex);
11247 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11248 
11249 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11250 	    != FC_SUCCESS) {
11251 		mutex_exit(&ptgt->tgt_mutex);
11252 		return (-1);
11253 	}
11254 
11255 	mutex_exit(&ptgt->tgt_mutex);
11256 
11257 	/* Fill in the Fabric Channel Header */
11258 	hp = &fpkt->pkt_cmd_fhdr;
11259 	hp->r_ctl = R_CTL_COMMAND;
11260 	hp->rsvd = 0;
11261 	hp->type = FC_TYPE_SCSI_FCP;
11262 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11263 	hp->seq_id = 0;
11264 	hp->df_ctl  = 0;
11265 	hp->seq_cnt = 0;
11266 	hp->ox_id = 0xffff;
11267 	hp->rx_id = 0xffff;
11268 	hp->ro = 0;
11269 
11270 	/*
11271 	 * A doubly linked list (cmd_forw, cmd_back) is built
11272 	 * out of every allocated packet on a per-lun basis
11273 	 *
11274 	 * The packets are maintained in the list so as to satisfy
11275 	 * scsi_abort() requests. At present (which is unlikely to
11276 	 * change in the future) nobody performs a real scsi_abort
11277 	 * in the SCSI target drivers (as they don't keep the packets
11278 	 * after doing scsi_transport - so they don't know how to
11279 	 * abort a packet other than sending a NULL to abort all
11280 	 * outstanding packets)
11281 	 */
11282 	mutex_enter(&plun->lun_mutex);
11283 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11284 		plun->lun_pkt_head->cmd_back = cmd;
11285 	} else {
11286 		plun->lun_pkt_tail = cmd;
11287 	}
11288 	plun->lun_pkt_head = cmd;
11289 	mutex_exit(&plun->lun_mutex);
11290 	return (0);
11291 }
11292 
11293 /*
11294  *     Function: fcp_pkt_teardown
11295  *
11296  *  Description: This function releases a scsi_pkt structure and all the
11297  *		 resources attached to it.
11298  *
11299  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11300  *
11301  * Return Value: None
11302  *
11303  *	Context: User, Kernel or Interrupt context.
11304  */
11305 static void
11306 fcp_pkt_teardown(struct scsi_pkt *pkt)
11307 {
11308 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11309 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11310 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11311 
11312 	/*
11313 	 * Remove the packet from the per-lun list
11314 	 */
11315 	mutex_enter(&plun->lun_mutex);
11316 	if (cmd->cmd_back) {
11317 		ASSERT(cmd != plun->lun_pkt_head);
11318 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11319 	} else {
11320 		ASSERT(cmd == plun->lun_pkt_head);
11321 		plun->lun_pkt_head = cmd->cmd_forw;
11322 	}
11323 
11324 	if (cmd->cmd_forw) {
11325 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11326 	} else {
11327 		ASSERT(cmd == plun->lun_pkt_tail);
11328 		plun->lun_pkt_tail = cmd->cmd_back;
11329 	}
11330 
11331 	mutex_exit(&plun->lun_mutex);
11332 
11333 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11334 }
11335 
11336 /*
11337  * Routine for reset notification setup, to register or cancel.
11338  * This function is called by SCSA
11339  */
11340 /*ARGSUSED*/
11341 static int
11342 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11343     void (*callback)(caddr_t), caddr_t arg)
11344 {
11345 	struct fcp_port *pptr = ADDR2FCP(ap);
11346 
11347 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11348 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11349 }
11350 
11351 
11352 static int
11353 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11354     ddi_eventcookie_t *event_cookiep)
11355 {
11356 	struct fcp_port *pptr = fcp_dip2port(dip);
11357 
11358 	if (pptr == NULL) {
11359 		return (DDI_FAILURE);
11360 	}
11361 
11362 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11363 	    event_cookiep, NDI_EVENT_NOPASS));
11364 }
11365 
11366 
11367 static int
11368 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11369     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11370     ddi_callback_id_t *cb_id)
11371 {
11372 	struct fcp_port *pptr = fcp_dip2port(dip);
11373 
11374 	if (pptr == NULL) {
11375 		return (DDI_FAILURE);
11376 	}
11377 
11378 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11379 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11380 }
11381 
11382 
11383 static int
11384 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11385 {
11386 
11387 	struct fcp_port *pptr = fcp_dip2port(dip);
11388 
11389 	if (pptr == NULL) {
11390 		return (DDI_FAILURE);
11391 	}
11392 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11393 }
11394 
11395 
11396 /*
11397  * called by the transport to post an event
11398  */
11399 static int
11400 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11401     ddi_eventcookie_t eventid, void *impldata)
11402 {
11403 	struct fcp_port *pptr = fcp_dip2port(dip);
11404 
11405 	if (pptr == NULL) {
11406 		return (DDI_FAILURE);
11407 	}
11408 
11409 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11410 	    eventid, impldata));
11411 }
11412 
11413 
11414 /*
11415  * A target in in many cases in Fibre Channel has a one to one relation
11416  * with a port identifier (which is also known as D_ID and also as AL_PA
11417  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11418  * will most likely result in resetting all LUNs (which means a reset will
11419  * occur on all the SCSI devices connected at the other end of the bridge)
11420  * That is the latest favorite topic for discussion, for, one can debate as
11421  * hot as one likes and come up with arguably a best solution to one's
11422  * satisfaction
11423  *
11424  * To stay on track and not digress much, here are the problems stated
11425  * briefly:
11426  *
11427  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11428  *	target drivers use RESET_TARGET even if their instance is on a
11429  *	LUN. Doesn't that sound a bit broken ?
11430  *
11431  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11432  *	control fields of an FCP_CMND structure. It should have been
11433  *	fixed right there, giving flexibility to the initiators to
11434  *	minimize havoc that could be caused by resetting a target.
11435  */
11436 static int
11437 fcp_reset_target(struct scsi_address *ap, int level)
11438 {
11439 	int			rval = FC_FAILURE;
11440 	char			lun_id[25];
11441 	struct fcp_port		*pptr = ADDR2FCP(ap);
11442 	struct fcp_lun	*plun = ADDR2LUN(ap);
11443 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11444 	struct scsi_pkt		*pkt;
11445 	struct fcp_pkt	*cmd;
11446 	struct fcp_rsp		*rsp;
11447 	uint32_t		tgt_cnt;
11448 	struct fcp_rsp_info	*rsp_info;
11449 	struct fcp_reset_elem	*p;
11450 	int			bval;
11451 
11452 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11453 	    KM_NOSLEEP)) == NULL) {
11454 		return (rval);
11455 	}
11456 
11457 	mutex_enter(&ptgt->tgt_mutex);
11458 	if (level == RESET_TARGET) {
11459 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11460 			mutex_exit(&ptgt->tgt_mutex);
11461 			kmem_free(p, sizeof (struct fcp_reset_elem));
11462 			return (rval);
11463 		}
11464 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11465 		(void) strcpy(lun_id, " ");
11466 	} else {
11467 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11468 			mutex_exit(&ptgt->tgt_mutex);
11469 			kmem_free(p, sizeof (struct fcp_reset_elem));
11470 			return (rval);
11471 		}
11472 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11473 
11474 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11475 	}
11476 	tgt_cnt = ptgt->tgt_change_cnt;
11477 
11478 	mutex_exit(&ptgt->tgt_mutex);
11479 
11480 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11481 	    0, 0, NULL, 0)) == NULL) {
11482 		kmem_free(p, sizeof (struct fcp_reset_elem));
11483 		mutex_enter(&ptgt->tgt_mutex);
11484 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11485 		mutex_exit(&ptgt->tgt_mutex);
11486 		return (rval);
11487 	}
11488 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11489 
11490 	/* fill in cmd part of packet */
11491 	cmd = PKT2CMD(pkt);
11492 	if (level == RESET_TARGET) {
11493 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11494 	} else {
11495 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11496 	}
11497 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11498 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11499 
11500 	/* prepare a packet for transport */
11501 	fcp_prepare_pkt(pptr, cmd, plun);
11502 
11503 	if (cmd->cmd_pkt->pkt_time) {
11504 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11505 	} else {
11506 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11507 	}
11508 
11509 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11510 	bval = fcp_dopoll(pptr, cmd);
11511 	fc_ulp_idle_port(pptr->port_fp_handle);
11512 
11513 	/* submit the packet */
11514 	if (bval == TRAN_ACCEPT) {
11515 		int error = 3;
11516 
11517 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11518 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11519 		    sizeof (struct fcp_rsp));
11520 
11521 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11522 			if (fcp_validate_fcp_response(rsp, pptr) ==
11523 			    FC_SUCCESS) {
11524 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11525 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11526 					    sizeof (struct fcp_rsp), rsp_info,
11527 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11528 					    sizeof (struct fcp_rsp_info));
11529 				}
11530 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11531 					rval = FC_SUCCESS;
11532 					error = 0;
11533 				} else {
11534 					error = 1;
11535 				}
11536 			} else {
11537 				error = 2;
11538 			}
11539 		}
11540 
11541 		switch (error) {
11542 		case 0:
11543 			fcp_log(CE_WARN, pptr->port_dip,
11544 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11545 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11546 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11547 			break;
11548 
11549 		case 1:
11550 			fcp_log(CE_WARN, pptr->port_dip,
11551 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11552 			    " response code=%x",
11553 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11554 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11555 			    rsp_info->rsp_code);
11556 			break;
11557 
11558 		case 2:
11559 			fcp_log(CE_WARN, pptr->port_dip,
11560 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11561 			    " Bad FCP response values: rsvd1=%x,"
11562 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11563 			    " rsplen=%x, senselen=%x",
11564 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11565 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11566 			    rsp->reserved_0, rsp->reserved_1,
11567 			    rsp->fcp_u.fcp_status.reserved_0,
11568 			    rsp->fcp_u.fcp_status.reserved_1,
11569 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11570 			break;
11571 
11572 		default:
11573 			fcp_log(CE_WARN, pptr->port_dip,
11574 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11575 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11576 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11577 			break;
11578 		}
11579 	}
11580 	scsi_destroy_pkt(pkt);
11581 
11582 	if (rval == FC_FAILURE) {
11583 		mutex_enter(&ptgt->tgt_mutex);
11584 		if (level == RESET_TARGET) {
11585 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11586 		} else {
11587 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11588 		}
11589 		mutex_exit(&ptgt->tgt_mutex);
11590 		kmem_free(p, sizeof (struct fcp_reset_elem));
11591 		return (rval);
11592 	}
11593 
11594 	mutex_enter(&pptr->port_mutex);
11595 	if (level == RESET_TARGET) {
11596 		p->tgt = ptgt;
11597 		p->lun = NULL;
11598 	} else {
11599 		p->tgt = NULL;
11600 		p->lun = plun;
11601 	}
11602 	p->tgt = ptgt;
11603 	p->tgt_cnt = tgt_cnt;
11604 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11605 	p->next = pptr->port_reset_list;
11606 	pptr->port_reset_list = p;
11607 
11608 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11609 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11610 	    "Notify ssd of the reset to reinstate the reservations");
11611 
11612 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11613 	    &pptr->port_reset_notify_listf);
11614 
11615 	mutex_exit(&pptr->port_mutex);
11616 
11617 	return (rval);
11618 }
11619 
11620 
11621 /*
11622  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11623  * SCSI capabilities
11624  */
11625 /* ARGSUSED */
11626 static int
11627 fcp_commoncap(struct scsi_address *ap, char *cap,
11628     int val, int tgtonly, int doset)
11629 {
11630 	struct fcp_port		*pptr = ADDR2FCP(ap);
11631 	struct fcp_lun	*plun = ADDR2LUN(ap);
11632 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11633 	int			cidx;
11634 	int			rval = FALSE;
11635 
11636 	if (cap == (char *)0) {
11637 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11638 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11639 		    "fcp_commoncap: invalid arg");
11640 		return (rval);
11641 	}
11642 
11643 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11644 		return (UNDEFINED);
11645 	}
11646 
11647 	/*
11648 	 * Process setcap request.
11649 	 */
11650 	if (doset) {
11651 		/*
11652 		 * At present, we can only set binary (0/1) values
11653 		 */
11654 		switch (cidx) {
11655 		case SCSI_CAP_ARQ:
11656 			if (val == 0) {
11657 				rval = FALSE;
11658 			} else {
11659 				rval = TRUE;
11660 			}
11661 			break;
11662 
11663 		case SCSI_CAP_LUN_RESET:
11664 			if (val) {
11665 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11666 			} else {
11667 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11668 			}
11669 			rval = TRUE;
11670 			break;
11671 
11672 		case SCSI_CAP_SECTOR_SIZE:
11673 			rval = TRUE;
11674 			break;
11675 		default:
11676 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11677 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11678 			    "fcp_setcap: unsupported %d", cidx);
11679 			rval = UNDEFINED;
11680 			break;
11681 		}
11682 
11683 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11684 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11685 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11686 		    "0x%x/0x%x/0x%x/%d",
11687 		    cap, val, tgtonly, doset, rval);
11688 
11689 	} else {
11690 		/*
11691 		 * Process getcap request.
11692 		 */
11693 		switch (cidx) {
11694 		case SCSI_CAP_DMA_MAX:
11695 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11696 
11697 			/*
11698 			 * Need to make an adjustment qlc is uint_t 64
11699 			 * st is int, so we will make the adjustment here
11700 			 * being as nobody wants to touch this.
11701 			 * It still leaves the max single block length
11702 			 * of 2 gig. This should last .
11703 			 */
11704 
11705 			if (rval == -1) {
11706 				rval = MAX_INT_DMA;
11707 			}
11708 
11709 			break;
11710 
11711 		case SCSI_CAP_INITIATOR_ID:
11712 			rval = pptr->port_id;
11713 			break;
11714 
11715 		case SCSI_CAP_ARQ:
11716 		case SCSI_CAP_RESET_NOTIFICATION:
11717 		case SCSI_CAP_TAGGED_QING:
11718 			rval = TRUE;
11719 			break;
11720 
11721 		case SCSI_CAP_SCSI_VERSION:
11722 			rval = 3;
11723 			break;
11724 
11725 		case SCSI_CAP_INTERCONNECT_TYPE:
11726 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11727 			    (ptgt->tgt_hard_addr == 0)) {
11728 				rval = INTERCONNECT_FABRIC;
11729 			} else {
11730 				rval = INTERCONNECT_FIBRE;
11731 			}
11732 			break;
11733 
11734 		case SCSI_CAP_LUN_RESET:
11735 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11736 			    TRUE : FALSE;
11737 			break;
11738 
11739 		default:
11740 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11741 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11742 			    "fcp_getcap: unsupported %d", cidx);
11743 			rval = UNDEFINED;
11744 			break;
11745 		}
11746 
11747 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11748 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11749 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11750 		    "0x%x/0x%x/0x%x/%d",
11751 		    cap, val, tgtonly, doset, rval);
11752 	}
11753 
11754 	return (rval);
11755 }
11756 
11757 /*
11758  * called by the transport to get the port-wwn and lun
11759  * properties of this device, and to create a "name" based on them
11760  *
11761  * these properties don't exist on sun4m
11762  *
11763  * return 1 for success else return 0
11764  */
11765 /* ARGSUSED */
11766 static int
11767 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11768 {
11769 	int			i;
11770 	int			*lun;
11771 	int			numChars;
11772 	uint_t			nlun;
11773 	uint_t			count;
11774 	uint_t			nbytes;
11775 	uchar_t			*bytes;
11776 	uint16_t		lun_num;
11777 	uint32_t		tgt_id;
11778 	char			**conf_wwn;
11779 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11780 	uchar_t			barray[FC_WWN_SIZE];
11781 	dev_info_t		*tgt_dip;
11782 	struct fcp_tgt	*ptgt;
11783 	struct fcp_port	*pptr;
11784 	struct fcp_lun	*plun;
11785 
11786 	ASSERT(sd != NULL);
11787 	ASSERT(name != NULL);
11788 
11789 	tgt_dip = sd->sd_dev;
11790 	pptr = ddi_get_soft_state(fcp_softstate,
11791 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11792 	if (pptr == NULL) {
11793 		return (0);
11794 	}
11795 
11796 	ASSERT(tgt_dip != NULL);
11797 
11798 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11799 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11800 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11801 		name[0] = '\0';
11802 		return (0);
11803 	}
11804 
11805 	if (nlun == 0) {
11806 		ddi_prop_free(lun);
11807 		return (0);
11808 	}
11809 
11810 	lun_num = lun[0];
11811 	ddi_prop_free(lun);
11812 
11813 	/*
11814 	 * Lookup for .conf WWN property
11815 	 */
11816 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11817 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11818 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11819 		ASSERT(count >= 1);
11820 
11821 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11822 		ddi_prop_free(conf_wwn);
11823 		mutex_enter(&pptr->port_mutex);
11824 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11825 			mutex_exit(&pptr->port_mutex);
11826 			return (0);
11827 		}
11828 		ptgt = plun->lun_tgt;
11829 		mutex_exit(&pptr->port_mutex);
11830 
11831 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11832 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11833 
11834 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11835 		    ptgt->tgt_hard_addr != 0) {
11836 			tgt_id = (uint32_t)fcp_alpa_to_switch[
11837 			    ptgt->tgt_hard_addr];
11838 		} else {
11839 			tgt_id = ptgt->tgt_d_id;
11840 		}
11841 
11842 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11843 		    TARGET_PROP, tgt_id);
11844 	}
11845 
11846 	/* get the our port-wwn property */
11847 	bytes = NULL;
11848 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11849 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11850 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11851 		if (bytes != NULL) {
11852 			ddi_prop_free(bytes);
11853 		}
11854 		return (0);
11855 	}
11856 
11857 	for (i = 0; i < FC_WWN_SIZE; i++) {
11858 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
11859 	}
11860 
11861 	/* Stick in the address of the form "wWWN,LUN" */
11862 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
11863 
11864 	ASSERT(numChars < len);
11865 	if (numChars >= len) {
11866 		fcp_log(CE_WARN, pptr->port_dip,
11867 		    "!fcp_scsi_get_name: "
11868 		    "name parameter length too small, it needs to be %d",
11869 		    numChars+1);
11870 	}
11871 
11872 	ddi_prop_free(bytes);
11873 
11874 	return (1);
11875 }
11876 
11877 
11878 /*
11879  * called by the transport to get the SCSI target id value, returning
11880  * it in "name"
11881  *
11882  * this isn't needed/used on sun4m
11883  *
11884  * return 1 for success else return 0
11885  */
11886 /* ARGSUSED */
11887 static int
11888 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
11889 {
11890 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
11891 	struct fcp_tgt	*ptgt;
11892 	int    numChars;
11893 
11894 	if (plun == NULL) {
11895 		return (0);
11896 	}
11897 
11898 	if ((ptgt = plun->lun_tgt) == NULL) {
11899 		return (0);
11900 	}
11901 
11902 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
11903 
11904 	ASSERT(numChars < len);
11905 	if (numChars >= len) {
11906 		fcp_log(CE_WARN, NULL,
11907 		    "!fcp_scsi_get_bus_addr: "
11908 		    "name parameter length too small, it needs to be %d",
11909 		    numChars+1);
11910 	}
11911 
11912 	return (1);
11913 }
11914 
11915 
11916 /*
11917  * called internally to reset the link where the specified port lives
11918  */
11919 static int
11920 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
11921 {
11922 	la_wwn_t		wwn;
11923 	struct fcp_lun	*plun;
11924 	struct fcp_tgt	*ptgt;
11925 
11926 	/* disable restart of lip if we're suspended */
11927 	mutex_enter(&pptr->port_mutex);
11928 
11929 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
11930 	    FCP_STATE_POWER_DOWN)) {
11931 		mutex_exit(&pptr->port_mutex);
11932 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11933 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
11934 		    "fcp_linkreset, fcp%d: link reset "
11935 		    "disabled due to DDI_SUSPEND",
11936 		    ddi_get_instance(pptr->port_dip));
11937 		return (FC_FAILURE);
11938 	}
11939 
11940 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
11941 		mutex_exit(&pptr->port_mutex);
11942 		return (FC_SUCCESS);
11943 	}
11944 
11945 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11946 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
11947 
11948 	/*
11949 	 * If ap == NULL assume local link reset.
11950 	 */
11951 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
11952 		plun = ADDR2LUN(ap);
11953 		ptgt = plun->lun_tgt;
11954 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
11955 	} else {
11956 		bzero((caddr_t)&wwn, sizeof (wwn));
11957 	}
11958 	mutex_exit(&pptr->port_mutex);
11959 
11960 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
11961 }
11962 
11963 
11964 /*
11965  * called from fcp_port_attach() to resume a port
11966  * return DDI_* success/failure status
11967  * acquires and releases the global mutex
11968  * acquires and releases the port mutex
11969  */
11970 /*ARGSUSED*/
11971 
11972 static int
11973 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
11974     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
11975 {
11976 	int			res = DDI_FAILURE; /* default result */
11977 	struct fcp_port	*pptr;		/* port state ptr */
11978 	uint32_t		alloc_cnt;
11979 	uint32_t		max_cnt;
11980 	fc_portmap_t		*tmp_list = NULL;
11981 
11982 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
11983 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
11984 	    instance);
11985 
11986 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
11987 		cmn_err(CE_WARN, "fcp: bad soft state");
11988 		return (res);
11989 	}
11990 
11991 	mutex_enter(&pptr->port_mutex);
11992 	switch (cmd) {
11993 	case FC_CMD_RESUME:
11994 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
11995 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
11996 		break;
11997 
11998 	case FC_CMD_POWER_UP:
11999 		/*
12000 		 * If the port is DDI_SUSPENded, defer rediscovery
12001 		 * until DDI_RESUME occurs
12002 		 */
12003 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12004 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12005 			mutex_exit(&pptr->port_mutex);
12006 			return (DDI_SUCCESS);
12007 		}
12008 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12009 	}
12010 	pptr->port_id = s_id;
12011 	pptr->port_state = FCP_STATE_INIT;
12012 	mutex_exit(&pptr->port_mutex);
12013 
12014 	/*
12015 	 * Make a copy of ulp_port_info as fctl allocates
12016 	 * a temp struct.
12017 	 */
12018 	(void) fcp_cp_pinfo(pptr, pinfo);
12019 
12020 	mutex_enter(&fcp_global_mutex);
12021 	if (fcp_watchdog_init++ == 0) {
12022 		fcp_watchdog_tick = fcp_watchdog_timeout *
12023 		    drv_usectohz(1000000);
12024 		fcp_watchdog_id = timeout(fcp_watch,
12025 		    NULL, fcp_watchdog_tick);
12026 	}
12027 	mutex_exit(&fcp_global_mutex);
12028 
12029 	/*
12030 	 * Handle various topologies and link states.
12031 	 */
12032 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12033 	case FC_STATE_OFFLINE:
12034 		/*
12035 		 * Wait for ONLINE, at which time a state
12036 		 * change will cause a statec_callback
12037 		 */
12038 		res = DDI_SUCCESS;
12039 		break;
12040 
12041 	case FC_STATE_ONLINE:
12042 
12043 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12044 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12045 			res = DDI_SUCCESS;
12046 			break;
12047 		}
12048 
12049 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12050 		    !fcp_enable_auto_configuration) {
12051 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12052 			if (tmp_list == NULL) {
12053 				if (!alloc_cnt) {
12054 					res = DDI_SUCCESS;
12055 				}
12056 				break;
12057 			}
12058 			max_cnt = alloc_cnt;
12059 		} else {
12060 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12061 
12062 			alloc_cnt = FCP_MAX_DEVICES;
12063 
12064 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12065 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12066 			    KM_NOSLEEP)) == NULL) {
12067 				fcp_log(CE_WARN, pptr->port_dip,
12068 				    "!fcp%d: failed to allocate portmap",
12069 				    instance);
12070 				break;
12071 			}
12072 
12073 			max_cnt = alloc_cnt;
12074 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12075 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12076 			    FC_SUCCESS) {
12077 				caddr_t msg;
12078 
12079 				(void) fc_ulp_error(res, &msg);
12080 
12081 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12082 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12083 				    "resume failed getportmap: reason=0x%x",
12084 				    res);
12085 
12086 				fcp_log(CE_WARN, pptr->port_dip,
12087 				    "!failed to get port map : %s", msg);
12088 				break;
12089 			}
12090 			if (max_cnt > alloc_cnt) {
12091 				alloc_cnt = max_cnt;
12092 			}
12093 		}
12094 
12095 		/*
12096 		 * do the SCSI device discovery and create
12097 		 * the devinfos
12098 		 */
12099 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12100 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12101 		    max_cnt, pptr->port_id);
12102 
12103 		res = DDI_SUCCESS;
12104 		break;
12105 
12106 	default:
12107 		fcp_log(CE_WARN, pptr->port_dip,
12108 		    "!fcp%d: invalid port state at attach=0x%x",
12109 		    instance, pptr->port_phys_state);
12110 
12111 		mutex_enter(&pptr->port_mutex);
12112 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12113 		mutex_exit(&pptr->port_mutex);
12114 		res = DDI_SUCCESS;
12115 
12116 		break;
12117 	}
12118 
12119 	if (tmp_list != NULL) {
12120 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12121 	}
12122 
12123 	return (res);
12124 }
12125 
12126 
12127 static void
12128 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12129 {
12130 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12131 	pptr->port_dip = pinfo->port_dip;
12132 	pptr->port_fp_handle = pinfo->port_handle;
12133 	pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12134 	pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12135 	pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12136 	pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12137 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12138 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12139 	pptr->port_phys_state = pinfo->port_state;
12140 	pptr->port_topology = pinfo->port_flags;
12141 	pptr->port_reset_action = pinfo->port_reset_action;
12142 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12143 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12144 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12145 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12146 }
12147 
12148 /*
12149  * If the elements wait field is set to 1 then
12150  * another thread is waiting for the operation to complete. Once
12151  * it is complete, the waiting thread is signaled and the element is
12152  * freed by the waiting thread. If the elements wait field is set to 0
12153  * the element is freed.
12154  */
12155 static void
12156 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12157 {
12158 	ASSERT(elem != NULL);
12159 	mutex_enter(&elem->mutex);
12160 	elem->result = result;
12161 	if (elem->wait) {
12162 		elem->wait = 0;
12163 		cv_signal(&elem->cv);
12164 		mutex_exit(&elem->mutex);
12165 	} else {
12166 		mutex_exit(&elem->mutex);
12167 		cv_destroy(&elem->cv);
12168 		mutex_destroy(&elem->mutex);
12169 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12170 	}
12171 }
12172 
12173 /*
12174  * This function is invoked from the taskq thread to allocate
12175  * devinfo nodes and to online/offline them.
12176  */
12177 static void
12178 fcp_hp_task(void *arg)
12179 {
12180 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12181 	struct fcp_lun	*plun = elem->lun;
12182 	struct fcp_port		*pptr = elem->port;
12183 	int			result;
12184 
12185 	ASSERT(elem->what == FCP_ONLINE ||
12186 	    elem->what == FCP_OFFLINE ||
12187 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12188 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12189 
12190 	mutex_enter(&pptr->port_mutex);
12191 	mutex_enter(&plun->lun_mutex);
12192 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12193 	    plun->lun_event_count != elem->event_cnt) ||
12194 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12195 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12196 		mutex_exit(&plun->lun_mutex);
12197 		mutex_exit(&pptr->port_mutex);
12198 		fcp_process_elem(elem, NDI_FAILURE);
12199 		return;
12200 	}
12201 	mutex_exit(&plun->lun_mutex);
12202 	mutex_exit(&pptr->port_mutex);
12203 
12204 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12205 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12206 	fcp_process_elem(elem, result);
12207 }
12208 
12209 
12210 static child_info_t *
12211 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12212     int tcount)
12213 {
12214 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12215 
12216 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12217 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12218 
12219 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12220 		/*
12221 		 * Child has not been created yet. Create the child device
12222 		 * based on the per-Lun flags.
12223 		 */
12224 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12225 			plun->lun_cip =
12226 			    CIP(fcp_create_dip(plun, lcount, tcount));
12227 			plun->lun_mpxio = 0;
12228 		} else {
12229 			plun->lun_cip =
12230 			    CIP(fcp_create_pip(plun, lcount, tcount));
12231 			plun->lun_mpxio = 1;
12232 		}
12233 	} else {
12234 		plun->lun_cip = cip;
12235 	}
12236 
12237 	return (plun->lun_cip);
12238 }
12239 
12240 
12241 static int
12242 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12243 {
12244 	int		rval = FC_FAILURE;
12245 	dev_info_t	*pdip;
12246 	struct dev_info	*dip;
12247 	int		circular;
12248 
12249 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12250 
12251 	pdip = plun->lun_tgt->tgt_port->port_dip;
12252 
12253 	if (plun->lun_cip == NULL) {
12254 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12255 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12256 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12257 		    "plun: %p lun state: %x num: %d target state: %x",
12258 		    plun, plun->lun_state, plun->lun_num,
12259 		    plun->lun_tgt->tgt_port->port_state);
12260 		return (rval);
12261 	}
12262 	ndi_devi_enter(pdip, &circular);
12263 	dip = DEVI(pdip)->devi_child;
12264 	while (dip) {
12265 		if (dip == DEVI(cdip)) {
12266 			rval = FC_SUCCESS;
12267 			break;
12268 		}
12269 		dip = dip->devi_sibling;
12270 	}
12271 	ndi_devi_exit(pdip, circular);
12272 	return (rval);
12273 }
12274 
12275 static int
12276 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12277 {
12278 	int		rval = FC_FAILURE;
12279 
12280 	ASSERT(plun != NULL);
12281 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12282 
12283 	if (plun->lun_mpxio == 0) {
12284 		rval = fcp_is_dip_present(plun, DIP(cip));
12285 	} else {
12286 		rval = fcp_is_pip_present(plun, PIP(cip));
12287 	}
12288 
12289 	return (rval);
12290 }
12291 
12292 /*
12293  *     Function: fcp_create_dip
12294  *
12295  *  Description: Creates a dev_info_t structure for the LUN specified by the
12296  *		 caller.
12297  *
12298  *     Argument: plun		Lun structure
12299  *		 link_cnt	Link state count.
12300  *		 tgt_cnt	Target state change count.
12301  *
12302  * Return Value: NULL if it failed
12303  *		 dev_info_t structure address if it succeeded
12304  *
12305  *	Context: Kernel context
12306  */
12307 static dev_info_t *
12308 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12309 {
12310 	int			failure = 0;
12311 	uint32_t		tgt_id;
12312 	uint64_t		sam_lun;
12313 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12314 	struct fcp_port	*pptr = ptgt->tgt_port;
12315 	dev_info_t		*pdip = pptr->port_dip;
12316 	dev_info_t		*cdip = NULL;
12317 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12318 	char			*nname = NULL;
12319 	char			**compatible = NULL;
12320 	int			ncompatible;
12321 	char			*scsi_binding_set;
12322 	char			t_pwwn[17];
12323 
12324 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12325 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12326 
12327 	/* get the 'scsi-binding-set' property */
12328 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12329 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12330 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12331 		scsi_binding_set = NULL;
12332 	}
12333 
12334 	/* determine the node name and compatible */
12335 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12336 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12337 	if (scsi_binding_set) {
12338 		ddi_prop_free(scsi_binding_set);
12339 	}
12340 
12341 	if (nname == NULL) {
12342 #ifdef	DEBUG
12343 		cmn_err(CE_WARN, "%s%d: no driver for "
12344 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12345 		    "	 compatible: %s",
12346 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12347 		    ptgt->tgt_port_wwn.raw_wwn[0],
12348 		    ptgt->tgt_port_wwn.raw_wwn[1],
12349 		    ptgt->tgt_port_wwn.raw_wwn[2],
12350 		    ptgt->tgt_port_wwn.raw_wwn[3],
12351 		    ptgt->tgt_port_wwn.raw_wwn[4],
12352 		    ptgt->tgt_port_wwn.raw_wwn[5],
12353 		    ptgt->tgt_port_wwn.raw_wwn[6],
12354 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12355 		    *compatible);
12356 #endif	/* DEBUG */
12357 		failure++;
12358 		goto end_of_fcp_create_dip;
12359 	}
12360 
12361 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12362 
12363 	/*
12364 	 * if the old_dip does not match the cdip, that means there is
12365 	 * some property change. since we'll be using the cdip, we need
12366 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12367 	 * then the dtype for the device has been updated. Offline the
12368 	 * the old device and create a new device with the new device type
12369 	 * Refer to bug: 4764752
12370 	 */
12371 	if (old_dip && (cdip != old_dip ||
12372 	    plun->lun_state & FCP_LUN_CHANGED)) {
12373 		plun->lun_state &= ~(FCP_LUN_INIT);
12374 		mutex_exit(&plun->lun_mutex);
12375 		mutex_exit(&pptr->port_mutex);
12376 
12377 		mutex_enter(&ptgt->tgt_mutex);
12378 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12379 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12380 		mutex_exit(&ptgt->tgt_mutex);
12381 
12382 #ifdef DEBUG
12383 		if (cdip != NULL) {
12384 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12385 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12386 			    "Old dip=%p; New dip=%p don't match", old_dip,
12387 			    cdip);
12388 		} else {
12389 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12390 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12391 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12392 		}
12393 #endif
12394 
12395 		mutex_enter(&pptr->port_mutex);
12396 		mutex_enter(&plun->lun_mutex);
12397 	}
12398 
12399 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12400 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12401 		if (ndi_devi_alloc(pptr->port_dip, nname,
12402 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12403 			failure++;
12404 			goto end_of_fcp_create_dip;
12405 		}
12406 	}
12407 
12408 	/*
12409 	 * Previously all the properties for the devinfo were destroyed here
12410 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12411 	 * the devid property (and other properties established by the target
12412 	 * driver or framework) which the code does not always recreate, this
12413 	 * call was removed.
12414 	 * This opens a theoretical possibility that we may return with a
12415 	 * stale devid on the node if the scsi entity behind the fibre channel
12416 	 * lun has changed.
12417 	 */
12418 
12419 	/* decorate the node with compatible */
12420 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12421 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12422 		failure++;
12423 		goto end_of_fcp_create_dip;
12424 	}
12425 
12426 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12427 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12428 		failure++;
12429 		goto end_of_fcp_create_dip;
12430 	}
12431 
12432 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12433 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12434 		failure++;
12435 		goto end_of_fcp_create_dip;
12436 	}
12437 
12438 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12439 	t_pwwn[16] = '\0';
12440 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12441 	    != DDI_PROP_SUCCESS) {
12442 		failure++;
12443 		goto end_of_fcp_create_dip;
12444 	}
12445 
12446 	/*
12447 	 * If there is no hard address - We might have to deal with
12448 	 * that by using WWN - Having said that it is important to
12449 	 * recognize this problem early so ssd can be informed of
12450 	 * the right interconnect type.
12451 	 */
12452 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12453 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12454 	} else {
12455 		tgt_id = ptgt->tgt_d_id;
12456 	}
12457 
12458 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12459 	    tgt_id) != DDI_PROP_SUCCESS) {
12460 		failure++;
12461 		goto end_of_fcp_create_dip;
12462 	}
12463 
12464 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12465 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12466 		failure++;
12467 		goto end_of_fcp_create_dip;
12468 	}
12469 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12470 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12471 	    sam_lun) != DDI_PROP_SUCCESS) {
12472 		failure++;
12473 		goto end_of_fcp_create_dip;
12474 	}
12475 
12476 end_of_fcp_create_dip:
12477 	scsi_hba_nodename_compatible_free(nname, compatible);
12478 
12479 	if (cdip != NULL && failure) {
12480 		(void) ndi_prop_remove_all(cdip);
12481 		(void) ndi_devi_free(cdip);
12482 		cdip = NULL;
12483 	}
12484 
12485 	return (cdip);
12486 }
12487 
12488 /*
12489  *     Function: fcp_create_pip
12490  *
12491  *  Description: Creates a Path Id for the LUN specified by the caller.
12492  *
12493  *     Argument: plun		Lun structure
12494  *		 link_cnt	Link state count.
12495  *		 tgt_cnt	Target state count.
12496  *
12497  * Return Value: NULL if it failed
12498  *		 mdi_pathinfo_t structure address if it succeeded
12499  *
12500  *	Context: Kernel context
12501  */
12502 static mdi_pathinfo_t *
12503 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12504 {
12505 	int			i;
12506 	char			buf[MAXNAMELEN];
12507 	char			uaddr[MAXNAMELEN];
12508 	int			failure = 0;
12509 	uint32_t		tgt_id;
12510 	uint64_t		sam_lun;
12511 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12512 	struct fcp_port	*pptr = ptgt->tgt_port;
12513 	dev_info_t		*pdip = pptr->port_dip;
12514 	mdi_pathinfo_t		*pip = NULL;
12515 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12516 	char			*nname = NULL;
12517 	char			**compatible = NULL;
12518 	int			ncompatible;
12519 	char			*scsi_binding_set;
12520 	char			t_pwwn[17];
12521 
12522 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12523 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12524 
12525 	scsi_binding_set = "vhci";
12526 
12527 	/* determine the node name and compatible */
12528 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12529 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12530 
12531 	if (nname == NULL) {
12532 #ifdef	DEBUG
12533 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12534 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12535 		    "	 compatible: %s",
12536 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12537 		    ptgt->tgt_port_wwn.raw_wwn[0],
12538 		    ptgt->tgt_port_wwn.raw_wwn[1],
12539 		    ptgt->tgt_port_wwn.raw_wwn[2],
12540 		    ptgt->tgt_port_wwn.raw_wwn[3],
12541 		    ptgt->tgt_port_wwn.raw_wwn[4],
12542 		    ptgt->tgt_port_wwn.raw_wwn[5],
12543 		    ptgt->tgt_port_wwn.raw_wwn[6],
12544 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12545 		    *compatible);
12546 #endif	/* DEBUG */
12547 		failure++;
12548 		goto end_of_fcp_create_pip;
12549 	}
12550 
12551 	pip = fcp_find_existing_pip(plun, pdip);
12552 
12553 	/*
12554 	 * if the old_dip does not match the cdip, that means there is
12555 	 * some property change. since we'll be using the cdip, we need
12556 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12557 	 * then the dtype for the device has been updated. Offline the
12558 	 * the old device and create a new device with the new device type
12559 	 * Refer to bug: 4764752
12560 	 */
12561 	if (old_pip && (pip != old_pip ||
12562 	    plun->lun_state & FCP_LUN_CHANGED)) {
12563 		plun->lun_state &= ~(FCP_LUN_INIT);
12564 		mutex_exit(&plun->lun_mutex);
12565 		mutex_exit(&pptr->port_mutex);
12566 
12567 		mutex_enter(&ptgt->tgt_mutex);
12568 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12569 		    FCP_OFFLINE, lcount, tcount,
12570 		    NDI_DEVI_REMOVE, 0);
12571 		mutex_exit(&ptgt->tgt_mutex);
12572 
12573 		if (pip != NULL) {
12574 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12575 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12576 			    "Old pip=%p; New pip=%p don't match",
12577 			    old_pip, pip);
12578 		} else {
12579 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12580 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12581 			    "Old pip=%p; New pip=NULL don't match",
12582 			    old_pip);
12583 		}
12584 
12585 		mutex_enter(&pptr->port_mutex);
12586 		mutex_enter(&plun->lun_mutex);
12587 	}
12588 
12589 	/*
12590 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12591 	 * lun_guid_size which is dependent on the target, I don't
12592 	 * believe the same trancation happens here UNLESS the standards
12593 	 * change the FC_WWN_SIZE value to something larger than
12594 	 * MAXNAMELEN(currently 255 bytes).
12595 	 */
12596 
12597 	for (i = 0; i < FC_WWN_SIZE; i++) {
12598 		(void) sprintf(&buf[i << 1], "%02x",
12599 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12600 	}
12601 
12602 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12603 	    buf, plun->lun_num);
12604 
12605 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12606 		/*
12607 		 * Release the locks before calling into
12608 		 * mdi_pi_alloc_compatible() since this can result in a
12609 		 * callback into fcp which can result in a deadlock
12610 		 * (see bug # 4870272).
12611 		 *
12612 		 * Basically, what we are trying to avoid is the scenario where
12613 		 * one thread does ndi_devi_enter() and tries to grab
12614 		 * fcp_mutex and another does it the other way round.
12615 		 *
12616 		 * But before we do that, make sure that nobody releases the
12617 		 * port in the meantime. We can do this by setting a flag.
12618 		 */
12619 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12620 		pptr->port_state |= FCP_STATE_IN_MDI;
12621 		mutex_exit(&plun->lun_mutex);
12622 		mutex_exit(&pptr->port_mutex);
12623 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12624 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12625 			fcp_log(CE_WARN, pptr->port_dip,
12626 			    "!path alloc failed:0x%x", plun);
12627 			mutex_enter(&pptr->port_mutex);
12628 			mutex_enter(&plun->lun_mutex);
12629 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12630 			failure++;
12631 			goto end_of_fcp_create_pip;
12632 		}
12633 		mutex_enter(&pptr->port_mutex);
12634 		mutex_enter(&plun->lun_mutex);
12635 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12636 	} else {
12637 		(void) mdi_prop_remove(pip, NULL);
12638 	}
12639 
12640 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12641 
12642 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12643 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12644 	    != DDI_PROP_SUCCESS) {
12645 		failure++;
12646 		goto end_of_fcp_create_pip;
12647 	}
12648 
12649 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12650 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12651 	    != DDI_PROP_SUCCESS) {
12652 		failure++;
12653 		goto end_of_fcp_create_pip;
12654 	}
12655 
12656 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12657 	t_pwwn[16] = '\0';
12658 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12659 	    != DDI_PROP_SUCCESS) {
12660 		failure++;
12661 		goto end_of_fcp_create_pip;
12662 	}
12663 
12664 	/*
12665 	 * If there is no hard address - We might have to deal with
12666 	 * that by using WWN - Having said that it is important to
12667 	 * recognize this problem early so ssd can be informed of
12668 	 * the right interconnect type.
12669 	 */
12670 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12671 	    ptgt->tgt_hard_addr != 0) {
12672 		tgt_id = (uint32_t)
12673 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12674 	} else {
12675 		tgt_id = ptgt->tgt_d_id;
12676 	}
12677 
12678 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12679 	    != DDI_PROP_SUCCESS) {
12680 		failure++;
12681 		goto end_of_fcp_create_pip;
12682 	}
12683 
12684 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12685 	    != DDI_PROP_SUCCESS) {
12686 		failure++;
12687 		goto end_of_fcp_create_pip;
12688 	}
12689 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12690 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12691 	    != DDI_PROP_SUCCESS) {
12692 		failure++;
12693 		goto end_of_fcp_create_pip;
12694 	}
12695 
12696 end_of_fcp_create_pip:
12697 	scsi_hba_nodename_compatible_free(nname, compatible);
12698 
12699 	if (pip != NULL && failure) {
12700 		(void) mdi_prop_remove(pip, NULL);
12701 		mutex_exit(&plun->lun_mutex);
12702 		mutex_exit(&pptr->port_mutex);
12703 		(void) mdi_pi_free(pip, 0);
12704 		mutex_enter(&pptr->port_mutex);
12705 		mutex_enter(&plun->lun_mutex);
12706 		pip = NULL;
12707 	}
12708 
12709 	return (pip);
12710 }
12711 
12712 static dev_info_t *
12713 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12714 {
12715 	uint_t			nbytes;
12716 	uchar_t			*bytes;
12717 	uint_t			nwords;
12718 	uint32_t		tgt_id;
12719 	int			*words;
12720 	dev_info_t		*cdip;
12721 	dev_info_t		*ndip;
12722 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12723 	struct fcp_port	*pptr = ptgt->tgt_port;
12724 	int			circular;
12725 
12726 	ndi_devi_enter(pdip, &circular);
12727 
12728 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12729 	while ((cdip = ndip) != NULL) {
12730 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12731 
12732 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12733 			continue;
12734 		}
12735 
12736 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12737 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12738 		    &nbytes) != DDI_PROP_SUCCESS) {
12739 			continue;
12740 		}
12741 
12742 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12743 			if (bytes != NULL) {
12744 				ddi_prop_free(bytes);
12745 			}
12746 			continue;
12747 		}
12748 		ASSERT(bytes != NULL);
12749 
12750 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12751 			ddi_prop_free(bytes);
12752 			continue;
12753 		}
12754 
12755 		ddi_prop_free(bytes);
12756 
12757 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12758 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12759 		    &nbytes) != DDI_PROP_SUCCESS) {
12760 			continue;
12761 		}
12762 
12763 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12764 			if (bytes != NULL) {
12765 				ddi_prop_free(bytes);
12766 			}
12767 			continue;
12768 		}
12769 		ASSERT(bytes != NULL);
12770 
12771 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12772 			ddi_prop_free(bytes);
12773 			continue;
12774 		}
12775 
12776 		ddi_prop_free(bytes);
12777 
12778 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12779 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12780 		    &nwords) != DDI_PROP_SUCCESS) {
12781 			continue;
12782 		}
12783 
12784 		if (nwords != 1 || words == NULL) {
12785 			if (words != NULL) {
12786 				ddi_prop_free(words);
12787 			}
12788 			continue;
12789 		}
12790 		ASSERT(words != NULL);
12791 
12792 		/*
12793 		 * If there is no hard address - We might have to deal with
12794 		 * that by using WWN - Having said that it is important to
12795 		 * recognize this problem early so ssd can be informed of
12796 		 * the right interconnect type.
12797 		 */
12798 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12799 		    ptgt->tgt_hard_addr != 0) {
12800 			tgt_id =
12801 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12802 		} else {
12803 			tgt_id = ptgt->tgt_d_id;
12804 		}
12805 
12806 		if (tgt_id != (uint32_t)*words) {
12807 			ddi_prop_free(words);
12808 			continue;
12809 		}
12810 		ddi_prop_free(words);
12811 
12812 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12813 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12814 		    &nwords) != DDI_PROP_SUCCESS) {
12815 			continue;
12816 		}
12817 
12818 		if (nwords != 1 || words == NULL) {
12819 			if (words != NULL) {
12820 				ddi_prop_free(words);
12821 			}
12822 			continue;
12823 		}
12824 		ASSERT(words != NULL);
12825 
12826 		if (plun->lun_num == (uint16_t)*words) {
12827 			ddi_prop_free(words);
12828 			break;
12829 		}
12830 		ddi_prop_free(words);
12831 	}
12832 	ndi_devi_exit(pdip, circular);
12833 
12834 	return (cdip);
12835 }
12836 
12837 
12838 static int
12839 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12840 {
12841 	dev_info_t	*pdip;
12842 	char		buf[MAXNAMELEN];
12843 	char		uaddr[MAXNAMELEN];
12844 	int		rval = FC_FAILURE;
12845 
12846 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12847 
12848 	pdip = plun->lun_tgt->tgt_port->port_dip;
12849 
12850 	/*
12851 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
12852 	 * non-NULL even when the LUN is not there as in the case when a LUN is
12853 	 * configured and then deleted on the device end (for T3/T4 case). In
12854 	 * such cases, pip will be NULL.
12855 	 *
12856 	 * If the device generates an RSCN, it will end up getting offlined when
12857 	 * it disappeared and a new LUN will get created when it is rediscovered
12858 	 * on the device. If we check for lun_cip here, the LUN will not end
12859 	 * up getting onlined since this function will end up returning a
12860 	 * FC_SUCCESS.
12861 	 *
12862 	 * The behavior is different on other devices. For instance, on a HDS,
12863 	 * there was no RSCN generated by the device but the next I/O generated
12864 	 * a check condition and rediscovery got triggered that way. So, in
12865 	 * such cases, this path will not be exercised
12866 	 */
12867 	if (pip == NULL) {
12868 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12869 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
12870 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
12871 		    "plun: %p lun state: %x num: %d target state: %x",
12872 		    plun, plun->lun_state, plun->lun_num,
12873 		    plun->lun_tgt->tgt_port->port_state);
12874 		return (rval);
12875 	}
12876 
12877 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
12878 
12879 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12880 
12881 	if (plun->lun_old_guid) {
12882 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
12883 			rval = FC_SUCCESS;
12884 		}
12885 	} else {
12886 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
12887 			rval = FC_SUCCESS;
12888 		}
12889 	}
12890 	return (rval);
12891 }
12892 
12893 static mdi_pathinfo_t *
12894 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
12895 {
12896 	char			buf[MAXNAMELEN];
12897 	char			uaddr[MAXNAMELEN];
12898 	mdi_pathinfo_t		*pip;
12899 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12900 	struct fcp_port	*pptr = ptgt->tgt_port;
12901 
12902 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12903 
12904 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
12905 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12906 
12907 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
12908 
12909 	return (pip);
12910 }
12911 
12912 
12913 static int
12914 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
12915     int tcount, int flags, int *circ)
12916 {
12917 	int			rval;
12918 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
12919 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12920 	dev_info_t		*cdip = NULL;
12921 
12922 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12923 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12924 
12925 	if (plun->lun_cip == NULL) {
12926 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12927 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12928 		    "fcp_online_child: plun->lun_cip is NULL: "
12929 		    "plun: %p state: %x num: %d target state: %x",
12930 		    plun, plun->lun_state, plun->lun_num,
12931 		    plun->lun_tgt->tgt_port->port_state);
12932 		return (NDI_FAILURE);
12933 	}
12934 again:
12935 	if (plun->lun_mpxio == 0) {
12936 		cdip = DIP(cip);
12937 		mutex_exit(&plun->lun_mutex);
12938 		mutex_exit(&pptr->port_mutex);
12939 
12940 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12941 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12942 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
12943 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12944 
12945 		/*
12946 		 * We could check for FCP_LUN_INIT here but chances
12947 		 * of getting here when it's already in FCP_LUN_INIT
12948 		 * is rare and a duplicate ndi_devi_online wouldn't
12949 		 * hurt either (as the node would already have been
12950 		 * in CF2)
12951 		 */
12952 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
12953 			rval = ndi_devi_bind_driver(cdip, flags);
12954 		} else {
12955 			rval = ndi_devi_online(cdip, flags);
12956 		}
12957 		/*
12958 		 * We log the message into trace buffer if the device
12959 		 * is "ses" and into syslog for any other device
12960 		 * type. This is to prevent the ndi_devi_online failure
12961 		 * message that appears for V880/A5K ses devices.
12962 		 */
12963 		if (rval == NDI_SUCCESS) {
12964 			mutex_enter(&ptgt->tgt_mutex);
12965 			plun->lun_state |= FCP_LUN_INIT;
12966 			mutex_exit(&ptgt->tgt_mutex);
12967 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
12968 			fcp_log(CE_NOTE, pptr->port_dip,
12969 			    "!ndi_devi_online:"
12970 			    " failed for %s: target=%x lun=%x %x",
12971 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12972 			    plun->lun_num, rval);
12973 		} else {
12974 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12975 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
12976 			    " !ndi_devi_online:"
12977 			    " failed for %s: target=%x lun=%x %x",
12978 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12979 			    plun->lun_num, rval);
12980 		}
12981 	} else {
12982 		cdip = mdi_pi_get_client(PIP(cip));
12983 		mutex_exit(&plun->lun_mutex);
12984 		mutex_exit(&pptr->port_mutex);
12985 
12986 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12987 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12988 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
12989 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12990 
12991 		/*
12992 		 * Hold path and exit phci to avoid deadlock with power
12993 		 * management code during mdi_pi_online.
12994 		 */
12995 		mdi_hold_path(PIP(cip));
12996 		mdi_devi_exit_phci(pptr->port_dip, *circ);
12997 
12998 		rval = mdi_pi_online(PIP(cip), flags);
12999 
13000 		mdi_devi_enter_phci(pptr->port_dip, circ);
13001 		mdi_rele_path(PIP(cip));
13002 
13003 		if (rval == MDI_SUCCESS) {
13004 			mutex_enter(&ptgt->tgt_mutex);
13005 			plun->lun_state |= FCP_LUN_INIT;
13006 			mutex_exit(&ptgt->tgt_mutex);
13007 
13008 			/*
13009 			 * Clear MPxIO path permanent disable in case
13010 			 * fcp hotplug dropped the offline event.
13011 			 */
13012 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13013 
13014 		} else if (rval == MDI_NOT_SUPPORTED) {
13015 			child_info_t	*old_cip = cip;
13016 
13017 			/*
13018 			 * MPxIO does not support this device yet.
13019 			 * Enumerate in legacy mode.
13020 			 */
13021 			mutex_enter(&pptr->port_mutex);
13022 			mutex_enter(&plun->lun_mutex);
13023 			plun->lun_mpxio = 0;
13024 			plun->lun_cip = NULL;
13025 			cdip = fcp_create_dip(plun, lcount, tcount);
13026 			plun->lun_cip = cip = CIP(cdip);
13027 			if (cip == NULL) {
13028 				fcp_log(CE_WARN, pptr->port_dip,
13029 				    "!fcp_online_child: "
13030 				    "Create devinfo failed for LU=%p", plun);
13031 				mutex_exit(&plun->lun_mutex);
13032 
13033 				mutex_enter(&ptgt->tgt_mutex);
13034 				plun->lun_state |= FCP_LUN_OFFLINE;
13035 				mutex_exit(&ptgt->tgt_mutex);
13036 
13037 				mutex_exit(&pptr->port_mutex);
13038 
13039 				/*
13040 				 * free the mdi_pathinfo node
13041 				 */
13042 				(void) mdi_pi_free(PIP(old_cip), 0);
13043 			} else {
13044 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13045 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13046 				    "fcp_online_child: creating devinfo "
13047 				    "node 0x%p for plun 0x%p",
13048 				    cip, plun);
13049 				mutex_exit(&plun->lun_mutex);
13050 				mutex_exit(&pptr->port_mutex);
13051 				/*
13052 				 * free the mdi_pathinfo node
13053 				 */
13054 				(void) mdi_pi_free(PIP(old_cip), 0);
13055 				mutex_enter(&pptr->port_mutex);
13056 				mutex_enter(&plun->lun_mutex);
13057 				goto again;
13058 			}
13059 		} else {
13060 			if (cdip) {
13061 				fcp_log(CE_NOTE, pptr->port_dip,
13062 				    "!fcp_online_child: mdi_pi_online:"
13063 				    " failed for %s: target=%x lun=%x %x",
13064 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13065 				    plun->lun_num, rval);
13066 			}
13067 		}
13068 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13069 	}
13070 
13071 	if (rval == NDI_SUCCESS) {
13072 		if (cdip) {
13073 			(void) ndi_event_retrieve_cookie(
13074 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13075 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13076 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13077 			    cdip, fcp_insert_eid, NULL);
13078 		}
13079 	}
13080 	mutex_enter(&pptr->port_mutex);
13081 	mutex_enter(&plun->lun_mutex);
13082 	return (rval);
13083 }
13084 
13085 /* ARGSUSED */
13086 static int
13087 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13088     int tcount, int flags, int *circ)
13089 {
13090 	int rval;
13091 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13092 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13093 	dev_info_t		*cdip;
13094 
13095 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13096 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13097 
13098 	if (plun->lun_cip == NULL) {
13099 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13100 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13101 		    "fcp_offline_child: plun->lun_cip is NULL: "
13102 		    "plun: %p lun state: %x num: %d target state: %x",
13103 		    plun, plun->lun_state, plun->lun_num,
13104 		    plun->lun_tgt->tgt_port->port_state);
13105 		return (NDI_FAILURE);
13106 	}
13107 
13108 	if (plun->lun_mpxio == 0) {
13109 		cdip = DIP(cip);
13110 		mutex_exit(&plun->lun_mutex);
13111 		mutex_exit(&pptr->port_mutex);
13112 		rval = ndi_devi_offline(DIP(cip), flags);
13113 		if (rval != NDI_SUCCESS) {
13114 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13115 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13116 			    "fcp_offline_child: ndi_devi_offline failed "
13117 			    "rval=%x cip=%p", rval, cip);
13118 		}
13119 	} else {
13120 		cdip = mdi_pi_get_client(PIP(cip));
13121 		mutex_exit(&plun->lun_mutex);
13122 		mutex_exit(&pptr->port_mutex);
13123 
13124 		/*
13125 		 * Exit phci to avoid deadlock with power management code
13126 		 * during mdi_pi_offline
13127 		 */
13128 		mdi_hold_path(PIP(cip));
13129 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13130 
13131 		rval = mdi_pi_offline(PIP(cip), flags);
13132 
13133 		mdi_devi_enter_phci(pptr->port_dip, circ);
13134 		mdi_rele_path(PIP(cip));
13135 
13136 		if (rval == MDI_SUCCESS) {
13137 			/*
13138 			 * Clear MPxIO path permanent disable as the path is
13139 			 * already offlined.
13140 			 */
13141 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13142 
13143 			if (flags & NDI_DEVI_REMOVE) {
13144 				(void) mdi_pi_free(PIP(cip), 0);
13145 			}
13146 		} else {
13147 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13148 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13149 			    "fcp_offline_child: mdi_pi_offline failed "
13150 			    "rval=%x cip=%p", rval, cip);
13151 		}
13152 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13153 	}
13154 
13155 	mutex_enter(&ptgt->tgt_mutex);
13156 	plun->lun_state &= ~FCP_LUN_INIT;
13157 	mutex_exit(&ptgt->tgt_mutex);
13158 
13159 	mutex_enter(&pptr->port_mutex);
13160 	mutex_enter(&plun->lun_mutex);
13161 
13162 	if (rval == NDI_SUCCESS) {
13163 		cdip = NULL;
13164 		if (flags & NDI_DEVI_REMOVE) {
13165 			/*
13166 			 * If the guid of the LUN changes, lun_cip will not
13167 			 * equal to cip, and after offlining the LUN with the
13168 			 * old guid, we should keep lun_cip since it's the cip
13169 			 * of the LUN with the new guid.
13170 			 * Otherwise remove our reference to child node.
13171 			 */
13172 			if (plun->lun_cip == cip) {
13173 				plun->lun_cip = NULL;
13174 			}
13175 			if (plun->lun_old_guid) {
13176 				kmem_free(plun->lun_old_guid,
13177 				    plun->lun_old_guid_size);
13178 				plun->lun_old_guid = NULL;
13179 				plun->lun_old_guid_size = 0;
13180 			}
13181 		}
13182 	}
13183 
13184 	if (cdip) {
13185 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13186 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13187 		    " target=%x lun=%x", "ndi_offline",
13188 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13189 	}
13190 
13191 	return (rval);
13192 }
13193 
13194 static void
13195 fcp_remove_child(struct fcp_lun *plun)
13196 {
13197 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13198 
13199 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13200 		if (plun->lun_mpxio == 0) {
13201 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13202 			(void) ndi_devi_free(DIP(plun->lun_cip));
13203 		} else {
13204 			mutex_exit(&plun->lun_mutex);
13205 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13206 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13207 			FCP_TRACE(fcp_logq,
13208 			    plun->lun_tgt->tgt_port->port_instbuf,
13209 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13210 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13211 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13212 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13213 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13214 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13215 			mutex_enter(&plun->lun_mutex);
13216 		}
13217 	}
13218 
13219 	plun->lun_cip = NULL;
13220 }
13221 
13222 /*
13223  * called when a timeout occurs
13224  *
13225  * can be scheduled during an attach or resume (if not already running)
13226  *
13227  * one timeout is set up for all ports
13228  *
13229  * acquires and releases the global mutex
13230  */
13231 /*ARGSUSED*/
13232 static void
13233 fcp_watch(void *arg)
13234 {
13235 	struct fcp_port	*pptr;
13236 	struct fcp_ipkt	*icmd;
13237 	struct fcp_ipkt	*nicmd;
13238 	struct fcp_pkt	*cmd;
13239 	struct fcp_pkt	*ncmd;
13240 	struct fcp_pkt	*tail;
13241 	struct fcp_pkt	*pcmd;
13242 	struct fcp_pkt	*save_head;
13243 	struct fcp_port	*save_port;
13244 
13245 	/* increment global watchdog time */
13246 	fcp_watchdog_time += fcp_watchdog_timeout;
13247 
13248 	mutex_enter(&fcp_global_mutex);
13249 
13250 	/* scan each port in our list */
13251 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13252 		save_port = fcp_port_head;
13253 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13254 		mutex_exit(&fcp_global_mutex);
13255 
13256 		mutex_enter(&pptr->port_mutex);
13257 		if (pptr->port_ipkt_list == NULL &&
13258 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13259 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13260 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13261 			mutex_exit(&pptr->port_mutex);
13262 			mutex_enter(&fcp_global_mutex);
13263 			goto end_of_watchdog;
13264 		}
13265 
13266 		/*
13267 		 * We check if a list of targets need to be offlined.
13268 		 */
13269 		if (pptr->port_offline_tgts) {
13270 			fcp_scan_offline_tgts(pptr);
13271 		}
13272 
13273 		/*
13274 		 * We check if a list of luns need to be offlined.
13275 		 */
13276 		if (pptr->port_offline_luns) {
13277 			fcp_scan_offline_luns(pptr);
13278 		}
13279 
13280 		/*
13281 		 * We check if a list of targets or luns need to be reset.
13282 		 */
13283 		if (pptr->port_reset_list) {
13284 			fcp_check_reset_delay(pptr);
13285 		}
13286 
13287 		mutex_exit(&pptr->port_mutex);
13288 
13289 		/*
13290 		 * This is where the pending commands (pkt) are checked for
13291 		 * timeout.
13292 		 */
13293 		mutex_enter(&pptr->port_pkt_mutex);
13294 		tail = pptr->port_pkt_tail;
13295 
13296 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13297 		    cmd != NULL; cmd = ncmd) {
13298 			ncmd = cmd->cmd_next;
13299 			/*
13300 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13301 			 * must be set.
13302 			 */
13303 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13304 			/*
13305 			 * FCP_INVALID_TIMEOUT will be set for those
13306 			 * command that need to be failed. Mostly those
13307 			 * cmds that could not be queued down for the
13308 			 * "timeout" value. cmd->cmd_timeout is used
13309 			 * to try and requeue the command regularly.
13310 			 */
13311 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13312 				/*
13313 				 * This command hasn't timed out yet.  Let's
13314 				 * go to the next one.
13315 				 */
13316 				pcmd = cmd;
13317 				goto end_of_loop;
13318 			}
13319 
13320 			if (cmd == pptr->port_pkt_head) {
13321 				ASSERT(pcmd == NULL);
13322 				pptr->port_pkt_head = cmd->cmd_next;
13323 			} else {
13324 				ASSERT(pcmd != NULL);
13325 				pcmd->cmd_next = cmd->cmd_next;
13326 			}
13327 
13328 			if (cmd == pptr->port_pkt_tail) {
13329 				ASSERT(cmd->cmd_next == NULL);
13330 				pptr->port_pkt_tail = pcmd;
13331 				if (pcmd) {
13332 					pcmd->cmd_next = NULL;
13333 				}
13334 			}
13335 			cmd->cmd_next = NULL;
13336 
13337 			/*
13338 			 * save the current head before dropping the
13339 			 * mutex - If the head doesn't remain the
13340 			 * same after re acquiring the mutex, just
13341 			 * bail out and revisit on next tick.
13342 			 *
13343 			 * PS: The tail pointer can change as the commands
13344 			 * get requeued after failure to retransport
13345 			 */
13346 			save_head = pptr->port_pkt_head;
13347 			mutex_exit(&pptr->port_pkt_mutex);
13348 
13349 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13350 			    FCP_INVALID_TIMEOUT) {
13351 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13352 				struct fcp_lun	*plun;
13353 				struct fcp_tgt	*ptgt;
13354 
13355 				plun = ADDR2LUN(&pkt->pkt_address);
13356 				ptgt = plun->lun_tgt;
13357 
13358 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13359 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13360 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13361 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13362 
13363 				cmd->cmd_state == FCP_PKT_ABORTING ?
13364 				    fcp_fail_cmd(cmd, CMD_RESET,
13365 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13366 				    CMD_TIMEOUT, STAT_ABORTED);
13367 			} else {
13368 				fcp_retransport_cmd(pptr, cmd);
13369 			}
13370 			mutex_enter(&pptr->port_pkt_mutex);
13371 			if (save_head && save_head != pptr->port_pkt_head) {
13372 				/*
13373 				 * Looks like linked list got changed (mostly
13374 				 * happens when an an OFFLINE LUN code starts
13375 				 * returning overflow queue commands in
13376 				 * parallel. So bail out and revisit during
13377 				 * next tick
13378 				 */
13379 				break;
13380 			}
13381 		end_of_loop:
13382 			/*
13383 			 * Scan only upto the previously known tail pointer
13384 			 * to avoid excessive processing - lots of new packets
13385 			 * could have been added to the tail or the old ones
13386 			 * re-queued.
13387 			 */
13388 			if (cmd == tail) {
13389 				break;
13390 			}
13391 		}
13392 		mutex_exit(&pptr->port_pkt_mutex);
13393 
13394 		mutex_enter(&pptr->port_mutex);
13395 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13396 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13397 
13398 			nicmd = icmd->ipkt_next;
13399 			if ((icmd->ipkt_restart != 0) &&
13400 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13401 				/* packet has not timed out */
13402 				continue;
13403 			}
13404 
13405 			/* time for packet re-transport */
13406 			if (icmd == pptr->port_ipkt_list) {
13407 				pptr->port_ipkt_list = icmd->ipkt_next;
13408 				if (pptr->port_ipkt_list) {
13409 					pptr->port_ipkt_list->ipkt_prev =
13410 					    NULL;
13411 				}
13412 			} else {
13413 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13414 				if (icmd->ipkt_next) {
13415 					icmd->ipkt_next->ipkt_prev =
13416 					    icmd->ipkt_prev;
13417 				}
13418 			}
13419 			icmd->ipkt_next = NULL;
13420 			icmd->ipkt_prev = NULL;
13421 			mutex_exit(&pptr->port_mutex);
13422 
13423 			if (fcp_is_retryable(icmd)) {
13424 				fc_ulp_rscn_info_t *rscnp =
13425 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13426 				    pkt_ulp_rscn_infop;
13427 
13428 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13429 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13430 				    "%x to D_ID=%x Retrying..",
13431 				    icmd->ipkt_opcode,
13432 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13433 
13434 				/*
13435 				 * Update the RSCN count in the packet
13436 				 * before resending.
13437 				 */
13438 
13439 				if (rscnp != NULL) {
13440 					rscnp->ulp_rscn_count =
13441 					    fc_ulp_get_rscn_count(pptr->
13442 					    port_fp_handle);
13443 				}
13444 
13445 				mutex_enter(&pptr->port_mutex);
13446 				mutex_enter(&ptgt->tgt_mutex);
13447 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13448 					mutex_exit(&ptgt->tgt_mutex);
13449 					mutex_exit(&pptr->port_mutex);
13450 					switch (icmd->ipkt_opcode) {
13451 						int rval;
13452 					case LA_ELS_PLOGI:
13453 						if ((rval = fc_ulp_login(
13454 						    pptr->port_fp_handle,
13455 						    &icmd->ipkt_fpkt, 1)) ==
13456 						    FC_SUCCESS) {
13457 							mutex_enter(
13458 							    &pptr->port_mutex);
13459 							continue;
13460 						}
13461 						if (fcp_handle_ipkt_errors(
13462 						    pptr, ptgt, icmd, rval,
13463 						    "PLOGI") == DDI_SUCCESS) {
13464 							mutex_enter(
13465 							    &pptr->port_mutex);
13466 							continue;
13467 						}
13468 						break;
13469 
13470 					case LA_ELS_PRLI:
13471 						if ((rval = fc_ulp_issue_els(
13472 						    pptr->port_fp_handle,
13473 						    icmd->ipkt_fpkt)) ==
13474 						    FC_SUCCESS) {
13475 							mutex_enter(
13476 							    &pptr->port_mutex);
13477 							continue;
13478 						}
13479 						if (fcp_handle_ipkt_errors(
13480 						    pptr, ptgt, icmd, rval,
13481 						    "PRLI") == DDI_SUCCESS) {
13482 							mutex_enter(
13483 							    &pptr->port_mutex);
13484 							continue;
13485 						}
13486 						break;
13487 
13488 					default:
13489 						if ((rval = fcp_transport(
13490 						    pptr->port_fp_handle,
13491 						    icmd->ipkt_fpkt, 1)) ==
13492 						    FC_SUCCESS) {
13493 							mutex_enter(
13494 							    &pptr->port_mutex);
13495 							continue;
13496 						}
13497 						if (fcp_handle_ipkt_errors(
13498 						    pptr, ptgt, icmd, rval,
13499 						    "PRLI") == DDI_SUCCESS) {
13500 							mutex_enter(
13501 							    &pptr->port_mutex);
13502 							continue;
13503 						}
13504 						break;
13505 					}
13506 				} else {
13507 					mutex_exit(&ptgt->tgt_mutex);
13508 					mutex_exit(&pptr->port_mutex);
13509 				}
13510 			} else {
13511 				fcp_print_error(icmd->ipkt_fpkt);
13512 			}
13513 
13514 			(void) fcp_call_finish_init(pptr, ptgt,
13515 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13516 			    icmd->ipkt_cause);
13517 			fcp_icmd_free(pptr, icmd);
13518 			mutex_enter(&pptr->port_mutex);
13519 		}
13520 
13521 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13522 		mutex_exit(&pptr->port_mutex);
13523 		mutex_enter(&fcp_global_mutex);
13524 
13525 	end_of_watchdog:
13526 		/*
13527 		 * Bail out early before getting into trouble
13528 		 */
13529 		if (save_port != fcp_port_head) {
13530 			break;
13531 		}
13532 	}
13533 
13534 	if (fcp_watchdog_init > 0) {
13535 		/* reschedule timeout to go again */
13536 		fcp_watchdog_id =
13537 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13538 	}
13539 	mutex_exit(&fcp_global_mutex);
13540 }
13541 
13542 
13543 static void
13544 fcp_check_reset_delay(struct fcp_port *pptr)
13545 {
13546 	uint32_t		tgt_cnt;
13547 	int			level;
13548 	struct fcp_tgt	*ptgt;
13549 	struct fcp_lun	*plun;
13550 	struct fcp_reset_elem *cur = NULL;
13551 	struct fcp_reset_elem *next = NULL;
13552 	struct fcp_reset_elem *prev = NULL;
13553 
13554 	ASSERT(mutex_owned(&pptr->port_mutex));
13555 
13556 	next = pptr->port_reset_list;
13557 	while ((cur = next) != NULL) {
13558 		next = cur->next;
13559 
13560 		if (cur->timeout < fcp_watchdog_time) {
13561 			prev = cur;
13562 			continue;
13563 		}
13564 
13565 		ptgt = cur->tgt;
13566 		plun = cur->lun;
13567 		tgt_cnt = cur->tgt_cnt;
13568 
13569 		if (ptgt) {
13570 			level = RESET_TARGET;
13571 		} else {
13572 			ASSERT(plun != NULL);
13573 			level = RESET_LUN;
13574 			ptgt = plun->lun_tgt;
13575 		}
13576 		if (prev) {
13577 			prev->next = next;
13578 		} else {
13579 			/*
13580 			 * Because we drop port mutex while doing aborts for
13581 			 * packets, we can't rely on reset_list pointing to
13582 			 * our head
13583 			 */
13584 			if (cur == pptr->port_reset_list) {
13585 				pptr->port_reset_list = next;
13586 			} else {
13587 				struct fcp_reset_elem *which;
13588 
13589 				which = pptr->port_reset_list;
13590 				while (which && which->next != cur) {
13591 					which = which->next;
13592 				}
13593 				ASSERT(which != NULL);
13594 
13595 				which->next = next;
13596 				prev = which;
13597 			}
13598 		}
13599 
13600 		kmem_free(cur, sizeof (*cur));
13601 
13602 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13603 			mutex_enter(&ptgt->tgt_mutex);
13604 			if (level == RESET_TARGET) {
13605 				fcp_update_tgt_state(ptgt,
13606 				    FCP_RESET, FCP_LUN_BUSY);
13607 			} else {
13608 				fcp_update_lun_state(plun,
13609 				    FCP_RESET, FCP_LUN_BUSY);
13610 			}
13611 			mutex_exit(&ptgt->tgt_mutex);
13612 
13613 			mutex_exit(&pptr->port_mutex);
13614 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13615 			mutex_enter(&pptr->port_mutex);
13616 		}
13617 	}
13618 }
13619 
13620 
13621 static void
13622 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13623     struct fcp_lun *rlun, int tgt_cnt)
13624 {
13625 	int			rval;
13626 	struct fcp_lun	*tlun, *nlun;
13627 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13628 	    *cmd = NULL, *head = NULL,
13629 	    *tail = NULL;
13630 
13631 	mutex_enter(&pptr->port_pkt_mutex);
13632 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13633 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13634 		struct fcp_tgt *ptgt = plun->lun_tgt;
13635 
13636 		ncmd = cmd->cmd_next;
13637 
13638 		if (ptgt != ttgt && plun != rlun) {
13639 			pcmd = cmd;
13640 			continue;
13641 		}
13642 
13643 		if (pcmd != NULL) {
13644 			ASSERT(pptr->port_pkt_head != cmd);
13645 			pcmd->cmd_next = ncmd;
13646 		} else {
13647 			ASSERT(cmd == pptr->port_pkt_head);
13648 			pptr->port_pkt_head = ncmd;
13649 		}
13650 		if (pptr->port_pkt_tail == cmd) {
13651 			ASSERT(cmd->cmd_next == NULL);
13652 			pptr->port_pkt_tail = pcmd;
13653 			if (pcmd != NULL) {
13654 				pcmd->cmd_next = NULL;
13655 			}
13656 		}
13657 
13658 		if (head == NULL) {
13659 			head = tail = cmd;
13660 		} else {
13661 			ASSERT(tail != NULL);
13662 			tail->cmd_next = cmd;
13663 			tail = cmd;
13664 		}
13665 		cmd->cmd_next = NULL;
13666 	}
13667 	mutex_exit(&pptr->port_pkt_mutex);
13668 
13669 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13670 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13671 
13672 		ncmd = cmd->cmd_next;
13673 		ASSERT(pkt != NULL);
13674 
13675 		mutex_enter(&pptr->port_mutex);
13676 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13677 			mutex_exit(&pptr->port_mutex);
13678 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13679 			pkt->pkt_reason = CMD_RESET;
13680 			pkt->pkt_statistics |= STAT_DEV_RESET;
13681 			cmd->cmd_state = FCP_PKT_IDLE;
13682 			fcp_post_callback(cmd);
13683 		} else {
13684 			mutex_exit(&pptr->port_mutex);
13685 		}
13686 	}
13687 
13688 	/*
13689 	 * If the FCA will return all the commands in its queue then our
13690 	 * work is easy, just return.
13691 	 */
13692 
13693 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13694 		return;
13695 	}
13696 
13697 	/*
13698 	 * For RESET_LUN get hold of target pointer
13699 	 */
13700 	if (ttgt == NULL) {
13701 		ASSERT(rlun != NULL);
13702 
13703 		ttgt = rlun->lun_tgt;
13704 
13705 		ASSERT(ttgt != NULL);
13706 	}
13707 
13708 	/*
13709 	 * There are some severe race conditions here.
13710 	 * While we are trying to abort the pkt, it might be completing
13711 	 * so mark it aborted and if the abort does not succeed then
13712 	 * handle it in the watch thread.
13713 	 */
13714 	mutex_enter(&ttgt->tgt_mutex);
13715 	nlun = ttgt->tgt_lun;
13716 	mutex_exit(&ttgt->tgt_mutex);
13717 	while ((tlun = nlun) != NULL) {
13718 		int restart = 0;
13719 		if (rlun && rlun != tlun) {
13720 			mutex_enter(&ttgt->tgt_mutex);
13721 			nlun = tlun->lun_next;
13722 			mutex_exit(&ttgt->tgt_mutex);
13723 			continue;
13724 		}
13725 		mutex_enter(&tlun->lun_mutex);
13726 		cmd = tlun->lun_pkt_head;
13727 		while (cmd != NULL) {
13728 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13729 				struct scsi_pkt *pkt;
13730 
13731 				restart = 1;
13732 				cmd->cmd_state = FCP_PKT_ABORTING;
13733 				mutex_exit(&tlun->lun_mutex);
13734 				rval = fc_ulp_abort(pptr->port_fp_handle,
13735 				    cmd->cmd_fp_pkt, KM_SLEEP);
13736 				if (rval == FC_SUCCESS) {
13737 					pkt = cmd->cmd_pkt;
13738 					pkt->pkt_reason = CMD_RESET;
13739 					pkt->pkt_statistics |= STAT_DEV_RESET;
13740 					cmd->cmd_state = FCP_PKT_IDLE;
13741 					fcp_post_callback(cmd);
13742 				} else {
13743 					caddr_t msg;
13744 
13745 					(void) fc_ulp_error(rval, &msg);
13746 
13747 					/*
13748 					 * This part is tricky. The abort
13749 					 * failed and now the command could
13750 					 * be completing.  The cmd_state ==
13751 					 * FCP_PKT_ABORTING should save
13752 					 * us in fcp_cmd_callback. If we
13753 					 * are already aborting ignore the
13754 					 * command in fcp_cmd_callback.
13755 					 * Here we leave this packet for 20
13756 					 * sec to be aborted in the
13757 					 * fcp_watch thread.
13758 					 */
13759 					fcp_log(CE_WARN, pptr->port_dip,
13760 					    "!Abort failed after reset %s",
13761 					    msg);
13762 
13763 					cmd->cmd_timeout =
13764 					    fcp_watchdog_time +
13765 					    cmd->cmd_pkt->pkt_time +
13766 					    FCP_FAILED_DELAY;
13767 
13768 					cmd->cmd_fp_pkt->pkt_timeout =
13769 					    FCP_INVALID_TIMEOUT;
13770 					/*
13771 					 * This is a hack, cmd is put in the
13772 					 * overflow queue so that it can be
13773 					 * timed out finally
13774 					 */
13775 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
13776 
13777 					mutex_enter(&pptr->port_pkt_mutex);
13778 					if (pptr->port_pkt_head) {
13779 						ASSERT(pptr->port_pkt_tail
13780 						    != NULL);
13781 						pptr->port_pkt_tail->cmd_next
13782 						    = cmd;
13783 						pptr->port_pkt_tail = cmd;
13784 					} else {
13785 						ASSERT(pptr->port_pkt_tail
13786 						    == NULL);
13787 						pptr->port_pkt_head =
13788 						    pptr->port_pkt_tail
13789 						    = cmd;
13790 					}
13791 					cmd->cmd_next = NULL;
13792 					mutex_exit(&pptr->port_pkt_mutex);
13793 				}
13794 				mutex_enter(&tlun->lun_mutex);
13795 				cmd = tlun->lun_pkt_head;
13796 			} else {
13797 				cmd = cmd->cmd_forw;
13798 			}
13799 		}
13800 		mutex_exit(&tlun->lun_mutex);
13801 
13802 		mutex_enter(&ttgt->tgt_mutex);
13803 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
13804 		mutex_exit(&ttgt->tgt_mutex);
13805 
13806 		mutex_enter(&pptr->port_mutex);
13807 		if (tgt_cnt != ttgt->tgt_change_cnt) {
13808 			mutex_exit(&pptr->port_mutex);
13809 			return;
13810 		} else {
13811 			mutex_exit(&pptr->port_mutex);
13812 		}
13813 	}
13814 }
13815 
13816 
13817 /*
13818  * unlink the soft state, returning the soft state found (if any)
13819  *
13820  * acquires and releases the global mutex
13821  */
13822 struct fcp_port *
13823 fcp_soft_state_unlink(struct fcp_port *pptr)
13824 {
13825 	struct fcp_port	*hptr;		/* ptr index */
13826 	struct fcp_port	*tptr;		/* prev hptr */
13827 
13828 	mutex_enter(&fcp_global_mutex);
13829 	for (hptr = fcp_port_head, tptr = NULL;
13830 	    hptr != NULL;
13831 	    tptr = hptr, hptr = hptr->port_next) {
13832 		if (hptr == pptr) {
13833 			/* we found a match -- remove this item */
13834 			if (tptr == NULL) {
13835 				/* we're at the head of the list */
13836 				fcp_port_head = hptr->port_next;
13837 			} else {
13838 				tptr->port_next = hptr->port_next;
13839 			}
13840 			break;			/* success */
13841 		}
13842 	}
13843 	if (fcp_port_head == NULL) {
13844 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
13845 	}
13846 	mutex_exit(&fcp_global_mutex);
13847 	return (hptr);
13848 }
13849 
13850 
13851 /*
13852  * called by fcp_scsi_hba_tgt_init to find a LUN given a
13853  * WWN and a LUN number
13854  */
13855 /* ARGSUSED */
13856 static struct fcp_lun *
13857 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
13858 {
13859 	int hash;
13860 	struct fcp_tgt *ptgt;
13861 	struct fcp_lun *plun;
13862 
13863 	ASSERT(mutex_owned(&pptr->port_mutex));
13864 
13865 	hash = FCP_HASH(wwn);
13866 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
13867 	    ptgt = ptgt->tgt_next) {
13868 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
13869 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
13870 			mutex_enter(&ptgt->tgt_mutex);
13871 			for (plun = ptgt->tgt_lun;
13872 			    plun != NULL;
13873 			    plun = plun->lun_next) {
13874 				if (plun->lun_num == lun) {
13875 					mutex_exit(&ptgt->tgt_mutex);
13876 					return (plun);
13877 				}
13878 			}
13879 			mutex_exit(&ptgt->tgt_mutex);
13880 			return (NULL);
13881 		}
13882 	}
13883 	return (NULL);
13884 }
13885 
13886 /*
13887  *     Function: fcp_prepare_pkt
13888  *
13889  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
13890  *		 for fcp_start(). It binds the data or partially maps it.
13891  *		 Builds the FCP header and starts the initialization of the
13892  *		 Fibre Channel header.
13893  *
13894  *     Argument: *pptr		FCP port.
13895  *		 *cmd		FCP packet.
13896  *		 *plun		LUN the command will be sent to.
13897  *
13898  *	Context: User, Kernel and Interrupt context.
13899  */
13900 static void
13901 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
13902     struct fcp_lun *plun)
13903 {
13904 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
13905 	struct fcp_tgt		*ptgt = plun->lun_tgt;
13906 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
13907 
13908 	ASSERT(cmd->cmd_pkt->pkt_comp ||
13909 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
13910 
13911 	if (cmd->cmd_pkt->pkt_numcookies) {
13912 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
13913 			fcmd->fcp_cntl.cntl_read_data = 1;
13914 			fcmd->fcp_cntl.cntl_write_data = 0;
13915 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
13916 		} else {
13917 			fcmd->fcp_cntl.cntl_read_data = 0;
13918 			fcmd->fcp_cntl.cntl_write_data = 1;
13919 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
13920 		}
13921 
13922 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
13923 
13924 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
13925 		ASSERT(fpkt->pkt_data_cookie_cnt <=
13926 		    pptr->port_data_dma_attr.dma_attr_sgllen);
13927 
13928 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
13929 
13930 		/* FCA needs pkt_datalen to be set */
13931 		fpkt->pkt_datalen = cmd->cmd_dmacount;
13932 		fcmd->fcp_data_len = cmd->cmd_dmacount;
13933 	} else {
13934 		fcmd->fcp_cntl.cntl_read_data = 0;
13935 		fcmd->fcp_cntl.cntl_write_data = 0;
13936 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
13937 		fpkt->pkt_datalen = 0;
13938 		fcmd->fcp_data_len = 0;
13939 	}
13940 
13941 	/* set up the Tagged Queuing type */
13942 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
13943 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
13944 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
13945 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
13946 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
13947 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
13948 	} else {
13949 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
13950 	}
13951 
13952 	fcmd->fcp_ent_addr = plun->lun_addr;
13953 
13954 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
13955 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
13956 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
13957 	} else {
13958 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
13959 	}
13960 
13961 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
13962 	cmd->cmd_pkt->pkt_state = 0;
13963 	cmd->cmd_pkt->pkt_statistics = 0;
13964 	cmd->cmd_pkt->pkt_resid = 0;
13965 
13966 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
13967 
13968 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
13969 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
13970 		fpkt->pkt_comp = NULL;
13971 	} else {
13972 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
13973 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
13974 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
13975 		}
13976 		fpkt->pkt_comp = fcp_cmd_callback;
13977 	}
13978 
13979 	mutex_enter(&pptr->port_mutex);
13980 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
13981 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
13982 	}
13983 	mutex_exit(&pptr->port_mutex);
13984 
13985 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
13986 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
13987 
13988 	/*
13989 	 * Save a few kernel cycles here
13990 	 */
13991 #ifndef	__lock_lint
13992 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
13993 #endif /* __lock_lint */
13994 }
13995 
13996 static void
13997 fcp_post_callback(struct fcp_pkt *cmd)
13998 {
13999 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14000 }
14001 
14002 
14003 /*
14004  * called to do polled I/O by fcp_start()
14005  *
14006  * return a transport status value, i.e. TRAN_ACCECPT for success
14007  */
14008 static int
14009 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14010 {
14011 	int	rval;
14012 
14013 #ifdef	DEBUG
14014 	mutex_enter(&pptr->port_pkt_mutex);
14015 	pptr->port_npkts++;
14016 	mutex_exit(&pptr->port_pkt_mutex);
14017 #endif /* DEBUG */
14018 
14019 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14020 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14021 	} else {
14022 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14023 	}
14024 
14025 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14026 
14027 	cmd->cmd_state = FCP_PKT_ISSUED;
14028 
14029 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14030 
14031 #ifdef	DEBUG
14032 	mutex_enter(&pptr->port_pkt_mutex);
14033 	pptr->port_npkts--;
14034 	mutex_exit(&pptr->port_pkt_mutex);
14035 #endif /* DEBUG */
14036 
14037 	cmd->cmd_state = FCP_PKT_IDLE;
14038 
14039 	switch (rval) {
14040 	case FC_SUCCESS:
14041 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14042 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14043 			rval = TRAN_ACCEPT;
14044 		} else {
14045 			rval = TRAN_FATAL_ERROR;
14046 		}
14047 		break;
14048 
14049 	case FC_TRAN_BUSY:
14050 		rval = TRAN_BUSY;
14051 		cmd->cmd_pkt->pkt_resid = 0;
14052 		break;
14053 
14054 	case FC_BADPACKET:
14055 		rval = TRAN_BADPKT;
14056 		break;
14057 
14058 	default:
14059 		rval = TRAN_FATAL_ERROR;
14060 		break;
14061 	}
14062 
14063 	return (rval);
14064 }
14065 
14066 
14067 /*
14068  * called by some of the following transport-called routines to convert
14069  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14070  */
14071 static struct fcp_port *
14072 fcp_dip2port(dev_info_t *dip)
14073 {
14074 	int	instance;
14075 
14076 	instance = ddi_get_instance(dip);
14077 	return (ddi_get_soft_state(fcp_softstate, instance));
14078 }
14079 
14080 
14081 /*
14082  * called internally to return a LUN given a dip
14083  */
14084 struct fcp_lun *
14085 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14086 {
14087 	struct fcp_tgt *ptgt;
14088 	struct fcp_lun *plun;
14089 	int i;
14090 
14091 
14092 	ASSERT(mutex_owned(&pptr->port_mutex));
14093 
14094 	for (i = 0; i < FCP_NUM_HASH; i++) {
14095 		for (ptgt = pptr->port_tgt_hash_table[i];
14096 		    ptgt != NULL;
14097 		    ptgt = ptgt->tgt_next) {
14098 			mutex_enter(&ptgt->tgt_mutex);
14099 			for (plun = ptgt->tgt_lun; plun != NULL;
14100 			    plun = plun->lun_next) {
14101 				mutex_enter(&plun->lun_mutex);
14102 				if (plun->lun_cip == cip) {
14103 					mutex_exit(&plun->lun_mutex);
14104 					mutex_exit(&ptgt->tgt_mutex);
14105 					return (plun); /* match found */
14106 				}
14107 				mutex_exit(&plun->lun_mutex);
14108 			}
14109 			mutex_exit(&ptgt->tgt_mutex);
14110 		}
14111 	}
14112 	return (NULL);				/* no LUN found */
14113 }
14114 
14115 /*
14116  * pass an element to the hotplug list, kick the hotplug thread
14117  * and wait for the element to get processed by the hotplug thread.
14118  * on return the element is freed.
14119  *
14120  * return zero success and non-zero on failure
14121  *
14122  * acquires/releases the target mutex
14123  *
14124  */
14125 static int
14126 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14127     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14128 {
14129 	struct fcp_hp_elem	*elem;
14130 	int			rval;
14131 
14132 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14133 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14134 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14135 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14136 		fcp_log(CE_CONT, pptr->port_dip,
14137 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14138 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14139 		return (NDI_FAILURE);
14140 	}
14141 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14142 	mutex_enter(&elem->mutex);
14143 	if (elem->wait) {
14144 		while (elem->wait) {
14145 			cv_wait(&elem->cv, &elem->mutex);
14146 		}
14147 	}
14148 	rval = (elem->result);
14149 	mutex_exit(&elem->mutex);
14150 	mutex_destroy(&elem->mutex);
14151 	cv_destroy(&elem->cv);
14152 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14153 	return (rval);
14154 }
14155 
14156 /*
14157  * pass an element to the hotplug list, and then
14158  * kick the hotplug thread
14159  *
14160  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14161  *
14162  * acquires/releases the hotplug mutex
14163  *
14164  * called with the target mutex owned
14165  *
14166  * memory acquired in NOSLEEP mode
14167  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14168  *	 for the hp daemon to process the request and is responsible for
14169  *	 freeing the element
14170  */
14171 static struct fcp_hp_elem *
14172 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14173     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14174 {
14175 	struct fcp_hp_elem	*elem;
14176 	dev_info_t *pdip;
14177 
14178 	ASSERT(pptr != NULL);
14179 	ASSERT(plun != NULL);
14180 	ASSERT(plun->lun_tgt != NULL);
14181 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14182 
14183 	/* create space for a hotplug element */
14184 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14185 	    == NULL) {
14186 		fcp_log(CE_WARN, NULL,
14187 		    "!can't allocate memory for hotplug element");
14188 		return (NULL);
14189 	}
14190 
14191 	/* fill in hotplug element */
14192 	elem->port = pptr;
14193 	elem->lun = plun;
14194 	elem->cip = cip;
14195 	elem->old_lun_mpxio = plun->lun_mpxio;
14196 	elem->what = what;
14197 	elem->flags = flags;
14198 	elem->link_cnt = link_cnt;
14199 	elem->tgt_cnt = tgt_cnt;
14200 	elem->wait = wait;
14201 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14202 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14203 
14204 	/* schedule the hotplug task */
14205 	pdip = pptr->port_dip;
14206 	mutex_enter(&plun->lun_mutex);
14207 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14208 		plun->lun_event_count++;
14209 		elem->event_cnt = plun->lun_event_count;
14210 	}
14211 	mutex_exit(&plun->lun_mutex);
14212 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14213 	    (void *)elem, KM_NOSLEEP) == NULL) {
14214 		mutex_enter(&plun->lun_mutex);
14215 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14216 			plun->lun_event_count--;
14217 		}
14218 		mutex_exit(&plun->lun_mutex);
14219 		kmem_free(elem, sizeof (*elem));
14220 		return (0);
14221 	}
14222 
14223 	return (elem);
14224 }
14225 
14226 
14227 static void
14228 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14229 {
14230 	int			rval;
14231 	struct scsi_address	*ap;
14232 	struct fcp_lun	*plun;
14233 	struct fcp_tgt	*ptgt;
14234 	fc_packet_t	*fpkt;
14235 
14236 	ap = &cmd->cmd_pkt->pkt_address;
14237 	plun = ADDR2LUN(ap);
14238 	ptgt = plun->lun_tgt;
14239 
14240 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14241 
14242 	cmd->cmd_state = FCP_PKT_IDLE;
14243 
14244 	mutex_enter(&pptr->port_mutex);
14245 	mutex_enter(&ptgt->tgt_mutex);
14246 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14247 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14248 		fc_ulp_rscn_info_t *rscnp;
14249 
14250 		cmd->cmd_state = FCP_PKT_ISSUED;
14251 
14252 		/*
14253 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14254 		 * originally NULL, hence we try to set it to the pd pointed
14255 		 * to by the SCSI device we're trying to get to.
14256 		 */
14257 
14258 		fpkt = cmd->cmd_fp_pkt;
14259 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14260 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14261 			/*
14262 			 * We need to notify the transport that we now have a
14263 			 * reference to the remote port handle.
14264 			 */
14265 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14266 		}
14267 
14268 		mutex_exit(&ptgt->tgt_mutex);
14269 		mutex_exit(&pptr->port_mutex);
14270 
14271 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14272 
14273 		/* prepare the packet */
14274 
14275 		fcp_prepare_pkt(pptr, cmd, plun);
14276 
14277 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14278 		    pkt_ulp_rscn_infop;
14279 
14280 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14281 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14282 
14283 		if (rscnp != NULL) {
14284 			rscnp->ulp_rscn_count =
14285 			    fc_ulp_get_rscn_count(pptr->
14286 			    port_fp_handle);
14287 		}
14288 
14289 		rval = fcp_transport(pptr->port_fp_handle,
14290 		    cmd->cmd_fp_pkt, 0);
14291 
14292 		if (rval == FC_SUCCESS) {
14293 			return;
14294 		}
14295 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14296 	} else {
14297 		mutex_exit(&ptgt->tgt_mutex);
14298 		mutex_exit(&pptr->port_mutex);
14299 	}
14300 
14301 	fcp_queue_pkt(pptr, cmd);
14302 }
14303 
14304 
14305 static void
14306 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14307 {
14308 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14309 
14310 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14311 	cmd->cmd_state = FCP_PKT_IDLE;
14312 
14313 	cmd->cmd_pkt->pkt_reason = reason;
14314 	cmd->cmd_pkt->pkt_state = 0;
14315 	cmd->cmd_pkt->pkt_statistics = statistics;
14316 
14317 	fcp_post_callback(cmd);
14318 }
14319 
14320 /*
14321  *     Function: fcp_queue_pkt
14322  *
14323  *  Description: This function queues the packet passed by the caller into
14324  *		 the list of packets of the FCP port.
14325  *
14326  *     Argument: *pptr		FCP port.
14327  *		 *cmd		FCP packet to queue.
14328  *
14329  * Return Value: None
14330  *
14331  *	Context: User, Kernel and Interrupt context.
14332  */
14333 static void
14334 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14335 {
14336 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14337 
14338 	mutex_enter(&pptr->port_pkt_mutex);
14339 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14340 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14341 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14342 
14343 	/*
14344 	 * zero pkt_time means hang around for ever
14345 	 */
14346 	if (cmd->cmd_pkt->pkt_time) {
14347 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14348 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14349 		} else {
14350 			/*
14351 			 * Indicate the watch thread to fail the
14352 			 * command by setting it to highest value
14353 			 */
14354 			cmd->cmd_timeout = fcp_watchdog_time;
14355 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14356 		}
14357 	}
14358 
14359 	if (pptr->port_pkt_head) {
14360 		ASSERT(pptr->port_pkt_tail != NULL);
14361 
14362 		pptr->port_pkt_tail->cmd_next = cmd;
14363 		pptr->port_pkt_tail = cmd;
14364 	} else {
14365 		ASSERT(pptr->port_pkt_tail == NULL);
14366 
14367 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14368 	}
14369 	cmd->cmd_next = NULL;
14370 	mutex_exit(&pptr->port_pkt_mutex);
14371 }
14372 
14373 /*
14374  *     Function: fcp_update_targets
14375  *
14376  *  Description: This function applies the specified change of state to all
14377  *		 the targets listed.  The operation applied is 'set'.
14378  *
14379  *     Argument: *pptr		FCP port.
14380  *		 *dev_list	Array of fc_portmap_t structures.
14381  *		 count		Length of dev_list.
14382  *		 state		State bits to update.
14383  *		 cause		Reason for the update.
14384  *
14385  * Return Value: None
14386  *
14387  *	Context: User, Kernel and Interrupt context.
14388  *		 The mutex pptr->port_mutex must be held.
14389  */
14390 static void
14391 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14392     uint32_t count, uint32_t state, int cause)
14393 {
14394 	fc_portmap_t		*map_entry;
14395 	struct fcp_tgt	*ptgt;
14396 
14397 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14398 
14399 	while (count--) {
14400 		map_entry = &(dev_list[count]);
14401 		ptgt = fcp_lookup_target(pptr,
14402 		    (uchar_t *)&(map_entry->map_pwwn));
14403 		if (ptgt == NULL) {
14404 			continue;
14405 		}
14406 
14407 		mutex_enter(&ptgt->tgt_mutex);
14408 		ptgt->tgt_trace = 0;
14409 		ptgt->tgt_change_cnt++;
14410 		ptgt->tgt_statec_cause = cause;
14411 		ptgt->tgt_tmp_cnt = 1;
14412 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14413 		mutex_exit(&ptgt->tgt_mutex);
14414 	}
14415 }
14416 
14417 static int
14418 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14419     int lcount, int tcount, int cause)
14420 {
14421 	int rval;
14422 
14423 	mutex_enter(&pptr->port_mutex);
14424 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14425 	mutex_exit(&pptr->port_mutex);
14426 
14427 	return (rval);
14428 }
14429 
14430 
14431 static int
14432 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14433     int lcount, int tcount, int cause)
14434 {
14435 	int	finish_init = 0;
14436 	int	finish_tgt = 0;
14437 	int	do_finish_init = 0;
14438 	int	rval = FCP_NO_CHANGE;
14439 
14440 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14441 	    cause == FCP_CAUSE_LINK_DOWN) {
14442 		do_finish_init = 1;
14443 	}
14444 
14445 	if (ptgt != NULL) {
14446 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14447 		    FCP_BUF_LEVEL_2, 0,
14448 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14449 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14450 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14451 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14452 		    ptgt->tgt_d_id, ptgt->tgt_done);
14453 
14454 		mutex_enter(&ptgt->tgt_mutex);
14455 
14456 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14457 			rval = FCP_DEV_CHANGE;
14458 			if (do_finish_init && ptgt->tgt_done == 0) {
14459 				ptgt->tgt_done++;
14460 				finish_init = 1;
14461 			}
14462 		} else {
14463 			if (--ptgt->tgt_tmp_cnt <= 0) {
14464 				ptgt->tgt_tmp_cnt = 0;
14465 				finish_tgt = 1;
14466 
14467 				if (do_finish_init) {
14468 					finish_init = 1;
14469 				}
14470 			}
14471 		}
14472 		mutex_exit(&ptgt->tgt_mutex);
14473 	} else {
14474 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14475 		    FCP_BUF_LEVEL_2, 0,
14476 		    "Call Finish Init for NO target");
14477 
14478 		if (do_finish_init) {
14479 			finish_init = 1;
14480 		}
14481 	}
14482 
14483 	if (finish_tgt) {
14484 		ASSERT(ptgt != NULL);
14485 
14486 		mutex_enter(&ptgt->tgt_mutex);
14487 #ifdef	DEBUG
14488 		bzero(ptgt->tgt_tmp_cnt_stack,
14489 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14490 
14491 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14492 		    FCP_STACK_DEPTH);
14493 #endif /* DEBUG */
14494 		mutex_exit(&ptgt->tgt_mutex);
14495 
14496 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14497 	}
14498 
14499 	if (finish_init && lcount == pptr->port_link_cnt) {
14500 		ASSERT(pptr->port_tmp_cnt > 0);
14501 		if (--pptr->port_tmp_cnt == 0) {
14502 			fcp_finish_init(pptr);
14503 		}
14504 	} else if (lcount != pptr->port_link_cnt) {
14505 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14506 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14507 		    "fcp_call_finish_init_held,1: state change occured"
14508 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14509 	}
14510 
14511 	return (rval);
14512 }
14513 
14514 
14515 static void
14516 fcp_reconfigure_luns(void * tgt_handle)
14517 {
14518 	uint32_t		dev_cnt;
14519 	fc_portmap_t		*devlist;
14520 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14521 	struct fcp_port		*pptr = ptgt->tgt_port;
14522 
14523 	/*
14524 	 * If the timer that fires this off got canceled too late, the
14525 	 * target could have been destroyed.
14526 	 */
14527 
14528 	if (ptgt->tgt_tid == NULL) {
14529 		return;
14530 	}
14531 
14532 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14533 	if (devlist == NULL) {
14534 		fcp_log(CE_WARN, pptr->port_dip,
14535 		    "!fcp%d: failed to allocate for portmap",
14536 		    pptr->port_instance);
14537 		return;
14538 	}
14539 
14540 	dev_cnt = 1;
14541 	devlist->map_pd = ptgt->tgt_pd_handle;
14542 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14543 	devlist->map_did.port_id = ptgt->tgt_d_id;
14544 
14545 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14546 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14547 
14548 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14549 	devlist->map_type = PORT_DEVICE_NEW;
14550 	devlist->map_flags = 0;
14551 
14552 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14553 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14554 
14555 	/*
14556 	 * Clear the tgt_tid after no more references to
14557 	 * the fcp_tgt
14558 	 */
14559 	mutex_enter(&ptgt->tgt_mutex);
14560 	ptgt->tgt_tid = NULL;
14561 	mutex_exit(&ptgt->tgt_mutex);
14562 
14563 	kmem_free(devlist, sizeof (*devlist));
14564 }
14565 
14566 
14567 static void
14568 fcp_free_targets(struct fcp_port *pptr)
14569 {
14570 	int			i;
14571 	struct fcp_tgt	*ptgt;
14572 
14573 	mutex_enter(&pptr->port_mutex);
14574 	for (i = 0; i < FCP_NUM_HASH; i++) {
14575 		ptgt = pptr->port_tgt_hash_table[i];
14576 		while (ptgt != NULL) {
14577 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14578 
14579 			fcp_free_target(ptgt);
14580 			ptgt = next_tgt;
14581 		}
14582 	}
14583 	mutex_exit(&pptr->port_mutex);
14584 }
14585 
14586 
14587 static void
14588 fcp_free_target(struct fcp_tgt *ptgt)
14589 {
14590 	struct fcp_lun	*plun;
14591 	timeout_id_t		tid;
14592 
14593 	mutex_enter(&ptgt->tgt_mutex);
14594 	tid = ptgt->tgt_tid;
14595 
14596 	/*
14597 	 * Cancel any pending timeouts for this target.
14598 	 */
14599 
14600 	if (tid != NULL) {
14601 		/*
14602 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14603 		 * If tgt_tid is NULL, the callback will simply return.
14604 		 */
14605 		ptgt->tgt_tid = NULL;
14606 		mutex_exit(&ptgt->tgt_mutex);
14607 		(void) untimeout(tid);
14608 		mutex_enter(&ptgt->tgt_mutex);
14609 	}
14610 
14611 	plun = ptgt->tgt_lun;
14612 	while (plun != NULL) {
14613 		struct fcp_lun *next_lun = plun->lun_next;
14614 
14615 		fcp_dealloc_lun(plun);
14616 		plun = next_lun;
14617 	}
14618 
14619 	mutex_exit(&ptgt->tgt_mutex);
14620 	fcp_dealloc_tgt(ptgt);
14621 }
14622 
14623 /*
14624  *     Function: fcp_is_retryable
14625  *
14626  *  Description: Indicates if the internal packet is retryable.
14627  *
14628  *     Argument: *icmd		FCP internal packet.
14629  *
14630  * Return Value: 0	Not retryable
14631  *		 1	Retryable
14632  *
14633  *	Context: User, Kernel and Interrupt context
14634  */
14635 static int
14636 fcp_is_retryable(struct fcp_ipkt *icmd)
14637 {
14638 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14639 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14640 		return (0);
14641 	}
14642 
14643 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14644 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14645 }
14646 
14647 /*
14648  *     Function: fcp_create_on_demand
14649  *
14650  *     Argument: *pptr		FCP port.
14651  *		 *pwwn		Port WWN.
14652  *
14653  * Return Value: 0	Success
14654  *		 EIO
14655  *		 ENOMEM
14656  *		 EBUSY
14657  *		 EINVAL
14658  *
14659  *	Context: User and Kernel context
14660  */
14661 static int
14662 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14663 {
14664 	int			wait_ms;
14665 	int			tcount;
14666 	int			lcount;
14667 	int			ret;
14668 	int			error;
14669 	int			rval = EIO;
14670 	int			ntries;
14671 	fc_portmap_t		*devlist;
14672 	opaque_t		pd;
14673 	struct fcp_lun		*plun;
14674 	struct fcp_tgt		*ptgt;
14675 	int			old_manual = 0;
14676 
14677 	/* Allocates the fc_portmap_t structure. */
14678 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14679 
14680 	/*
14681 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14682 	 * in the commented statement below:
14683 	 *
14684 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14685 	 *
14686 	 * Below, the deadline for the discovery process is set.
14687 	 */
14688 	mutex_enter(&pptr->port_mutex);
14689 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14690 	mutex_exit(&pptr->port_mutex);
14691 
14692 	/*
14693 	 * We try to find the remote port based on the WWN provided by the
14694 	 * caller.  We actually ask fp/fctl if it has it.
14695 	 */
14696 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14697 	    (la_wwn_t *)pwwn, &error, 1);
14698 
14699 	if (pd == NULL) {
14700 		kmem_free(devlist, sizeof (*devlist));
14701 		return (rval);
14702 	}
14703 
14704 	/*
14705 	 * The remote port was found.  We ask fp/fctl to update our
14706 	 * fc_portmap_t structure.
14707 	 */
14708 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14709 	    (la_wwn_t *)pwwn, devlist);
14710 	if (ret != FC_SUCCESS) {
14711 		kmem_free(devlist, sizeof (*devlist));
14712 		return (rval);
14713 	}
14714 
14715 	/*
14716 	 * The map flag field is set to indicates that the creation is being
14717 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14718 	 */
14719 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14720 
14721 	mutex_enter(&pptr->port_mutex);
14722 
14723 	/*
14724 	 * We check to see if fcp already has a target that describes the
14725 	 * device being created.  If not it is created.
14726 	 */
14727 	ptgt = fcp_lookup_target(pptr, pwwn);
14728 	if (ptgt == NULL) {
14729 		lcount = pptr->port_link_cnt;
14730 		mutex_exit(&pptr->port_mutex);
14731 
14732 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14733 		if (ptgt == NULL) {
14734 			fcp_log(CE_WARN, pptr->port_dip,
14735 			    "!FC target allocation failed");
14736 			return (ENOMEM);
14737 		}
14738 
14739 		mutex_enter(&pptr->port_mutex);
14740 	}
14741 
14742 	mutex_enter(&ptgt->tgt_mutex);
14743 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14744 	ptgt->tgt_tmp_cnt = 1;
14745 	ptgt->tgt_device_created = 0;
14746 	/*
14747 	 * If fabric and auto config is set but the target was
14748 	 * manually unconfigured then reset to the manual_config_only to
14749 	 * 0 so the device will get configured.
14750 	 */
14751 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14752 	    fcp_enable_auto_configuration &&
14753 	    ptgt->tgt_manual_config_only == 1) {
14754 		old_manual = 1;
14755 		ptgt->tgt_manual_config_only = 0;
14756 	}
14757 	mutex_exit(&ptgt->tgt_mutex);
14758 
14759 	fcp_update_targets(pptr, devlist, 1,
14760 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14761 
14762 	lcount = pptr->port_link_cnt;
14763 	tcount = ptgt->tgt_change_cnt;
14764 
14765 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14766 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14767 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14768 		    fcp_enable_auto_configuration && old_manual) {
14769 			mutex_enter(&ptgt->tgt_mutex);
14770 			ptgt->tgt_manual_config_only = 1;
14771 			mutex_exit(&ptgt->tgt_mutex);
14772 		}
14773 
14774 		if (pptr->port_link_cnt != lcount ||
14775 		    ptgt->tgt_change_cnt != tcount) {
14776 			rval = EBUSY;
14777 		}
14778 		mutex_exit(&pptr->port_mutex);
14779 
14780 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14781 		    FCP_BUF_LEVEL_3, 0,
14782 		    "fcp_create_on_demand: mapflags ptgt=%x, "
14783 		    "lcount=%x::port_link_cnt=%x, "
14784 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
14785 		    ptgt, lcount, pptr->port_link_cnt,
14786 		    tcount, ptgt->tgt_change_cnt, rval);
14787 		return (rval);
14788 	}
14789 
14790 	/*
14791 	 * Due to lack of synchronization mechanisms, we perform
14792 	 * periodic monitoring of our request; Because requests
14793 	 * get dropped when another one supercedes (either because
14794 	 * of a link change or a target change), it is difficult to
14795 	 * provide a clean synchronization mechanism (such as a
14796 	 * semaphore or a conditional variable) without exhaustively
14797 	 * rewriting the mainline discovery code of this driver.
14798 	 */
14799 	wait_ms = 500;
14800 
14801 	ntries = fcp_max_target_retries;
14802 
14803 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14804 	    FCP_BUF_LEVEL_3, 0,
14805 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
14806 	    "lcount=%x::port_link_cnt=%x, "
14807 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14808 	    "tgt_tmp_cnt =%x",
14809 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14810 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14811 	    ptgt->tgt_tmp_cnt);
14812 
14813 	mutex_enter(&ptgt->tgt_mutex);
14814 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
14815 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
14816 		mutex_exit(&ptgt->tgt_mutex);
14817 		mutex_exit(&pptr->port_mutex);
14818 
14819 		delay(drv_usectohz(wait_ms * 1000));
14820 
14821 		mutex_enter(&pptr->port_mutex);
14822 		mutex_enter(&ptgt->tgt_mutex);
14823 	}
14824 
14825 
14826 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
14827 		rval = EBUSY;
14828 	} else {
14829 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
14830 		    FCP_TGT_NODE_PRESENT) {
14831 			rval = 0;
14832 		}
14833 	}
14834 
14835 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14836 	    FCP_BUF_LEVEL_3, 0,
14837 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
14838 	    "lcount=%x::port_link_cnt=%x, "
14839 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14840 	    "tgt_tmp_cnt =%x",
14841 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14842 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14843 	    ptgt->tgt_tmp_cnt);
14844 
14845 	if (rval) {
14846 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14847 		    fcp_enable_auto_configuration && old_manual) {
14848 			ptgt->tgt_manual_config_only = 1;
14849 		}
14850 		mutex_exit(&ptgt->tgt_mutex);
14851 		mutex_exit(&pptr->port_mutex);
14852 		kmem_free(devlist, sizeof (*devlist));
14853 
14854 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14855 		    FCP_BUF_LEVEL_3, 0,
14856 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
14857 		    "lcount=%x::port_link_cnt=%x, "
14858 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
14859 		    "tgt_device_created=%x, tgt D_ID=%x",
14860 		    ntries, ptgt, lcount, pptr->port_link_cnt,
14861 		    tcount, ptgt->tgt_change_cnt, rval,
14862 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
14863 		return (rval);
14864 	}
14865 
14866 	if ((plun = ptgt->tgt_lun) != NULL) {
14867 		tcount = plun->lun_tgt->tgt_change_cnt;
14868 	} else {
14869 		rval = EINVAL;
14870 	}
14871 	lcount = pptr->port_link_cnt;
14872 
14873 	/*
14874 	 * Configuring the target with no LUNs will fail. We
14875 	 * should reset the node state so that it is not
14876 	 * automatically configured when the LUNs are added
14877 	 * to this target.
14878 	 */
14879 	if (ptgt->tgt_lun_cnt == 0) {
14880 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
14881 	}
14882 	mutex_exit(&ptgt->tgt_mutex);
14883 	mutex_exit(&pptr->port_mutex);
14884 
14885 	while (plun) {
14886 		child_info_t	*cip;
14887 
14888 		mutex_enter(&plun->lun_mutex);
14889 		cip = plun->lun_cip;
14890 		mutex_exit(&plun->lun_mutex);
14891 
14892 		mutex_enter(&ptgt->tgt_mutex);
14893 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
14894 			mutex_exit(&ptgt->tgt_mutex);
14895 
14896 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
14897 			    FCP_ONLINE, lcount, tcount,
14898 			    NDI_ONLINE_ATTACH);
14899 			if (rval != NDI_SUCCESS) {
14900 				FCP_TRACE(fcp_logq,
14901 				    pptr->port_instbuf, fcp_trace,
14902 				    FCP_BUF_LEVEL_3, 0,
14903 				    "fcp_create_on_demand: "
14904 				    "pass_to_hp_and_wait failed "
14905 				    "rval=%x", rval);
14906 				rval = EIO;
14907 			} else {
14908 				mutex_enter(&LUN_TGT->tgt_mutex);
14909 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
14910 				    FCP_LUN_BUSY);
14911 				mutex_exit(&LUN_TGT->tgt_mutex);
14912 			}
14913 			mutex_enter(&ptgt->tgt_mutex);
14914 		}
14915 
14916 		plun = plun->lun_next;
14917 		mutex_exit(&ptgt->tgt_mutex);
14918 	}
14919 
14920 	kmem_free(devlist, sizeof (*devlist));
14921 
14922 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14923 	    fcp_enable_auto_configuration && old_manual) {
14924 		mutex_enter(&ptgt->tgt_mutex);
14925 		/* if successful then set manual to 0 */
14926 		if (rval == 0) {
14927 			ptgt->tgt_manual_config_only = 0;
14928 		} else {
14929 			/* reset to 1 so the user has to do the config */
14930 			ptgt->tgt_manual_config_only = 1;
14931 		}
14932 		mutex_exit(&ptgt->tgt_mutex);
14933 	}
14934 
14935 	return (rval);
14936 }
14937 
14938 
14939 static void
14940 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
14941 {
14942 	int		count;
14943 	uchar_t		byte;
14944 
14945 	count = 0;
14946 	while (*string) {
14947 		byte = FCP_ATOB(*string); string++;
14948 		byte = byte << 4 | FCP_ATOB(*string); string++;
14949 		bytes[count++] = byte;
14950 
14951 		if (count >= byte_len) {
14952 			break;
14953 		}
14954 	}
14955 }
14956 
14957 static void
14958 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
14959 {
14960 	int		i;
14961 
14962 	for (i = 0; i < FC_WWN_SIZE; i++) {
14963 		(void) sprintf(string + (i * 2),
14964 		    "%02x", wwn[i]);
14965 	}
14966 
14967 }
14968 
14969 static void
14970 fcp_print_error(fc_packet_t *fpkt)
14971 {
14972 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
14973 	    fpkt->pkt_ulp_private;
14974 	struct fcp_port	*pptr;
14975 	struct fcp_tgt	*ptgt;
14976 	struct fcp_lun	*plun;
14977 	caddr_t			buf;
14978 	int			scsi_cmd = 0;
14979 
14980 	ptgt = icmd->ipkt_tgt;
14981 	plun = icmd->ipkt_lun;
14982 	pptr = ptgt->tgt_port;
14983 
14984 	buf = kmem_zalloc(256, KM_NOSLEEP);
14985 	if (buf == NULL) {
14986 		return;
14987 	}
14988 
14989 	switch (icmd->ipkt_opcode) {
14990 	case SCMD_REPORT_LUN:
14991 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
14992 		    " lun=0x%%x failed");
14993 		scsi_cmd++;
14994 		break;
14995 
14996 	case SCMD_INQUIRY_PAGE83:
14997 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
14998 		    " lun=0x%%x failed");
14999 		scsi_cmd++;
15000 		break;
15001 
15002 	case SCMD_INQUIRY:
15003 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15004 		    " lun=0x%%x failed");
15005 		scsi_cmd++;
15006 		break;
15007 
15008 	case LA_ELS_PLOGI:
15009 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15010 		break;
15011 
15012 	case LA_ELS_PRLI:
15013 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15014 		break;
15015 	}
15016 
15017 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15018 		struct fcp_rsp		response, *rsp;
15019 		uchar_t			asc, ascq;
15020 		caddr_t			sense_key = NULL;
15021 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15022 
15023 		if (icmd->ipkt_nodma) {
15024 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15025 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15026 			    sizeof (struct fcp_rsp));
15027 		} else {
15028 			rsp = &response;
15029 			bep = &fcp_rsp_err;
15030 
15031 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15032 			    sizeof (struct fcp_rsp));
15033 
15034 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15035 			    bep, fpkt->pkt_resp_acc,
15036 			    sizeof (struct fcp_rsp_info));
15037 		}
15038 
15039 
15040 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15041 			(void) sprintf(buf + strlen(buf),
15042 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15043 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15044 			    " senselen=%%x. Giving up");
15045 
15046 			fcp_log(CE_WARN, pptr->port_dip, buf,
15047 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15048 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15049 			    rsp->fcp_u.fcp_status.reserved_1,
15050 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15051 
15052 			kmem_free(buf, 256);
15053 			return;
15054 		}
15055 
15056 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15057 		    bep->rsp_code != FCP_NO_FAILURE) {
15058 			(void) sprintf(buf + strlen(buf),
15059 			    " FCP Response code = 0x%x", bep->rsp_code);
15060 		}
15061 
15062 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15063 			struct scsi_extended_sense sense_info, *sense_ptr;
15064 
15065 			if (icmd->ipkt_nodma) {
15066 				sense_ptr = (struct scsi_extended_sense *)
15067 				    ((caddr_t)fpkt->pkt_resp +
15068 				    sizeof (struct fcp_rsp) +
15069 				    rsp->fcp_response_len);
15070 			} else {
15071 				sense_ptr = &sense_info;
15072 
15073 				FCP_CP_IN(fpkt->pkt_resp +
15074 				    sizeof (struct fcp_rsp) +
15075 				    rsp->fcp_response_len, &sense_info,
15076 				    fpkt->pkt_resp_acc,
15077 				    sizeof (struct scsi_extended_sense));
15078 			}
15079 
15080 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15081 			    NUM_IMPL_SENSE_KEYS) {
15082 				sense_key = sense_keys[sense_ptr->es_key];
15083 			} else {
15084 				sense_key = "Undefined";
15085 			}
15086 
15087 			asc = sense_ptr->es_add_code;
15088 			ascq = sense_ptr->es_qual_code;
15089 
15090 			(void) sprintf(buf + strlen(buf),
15091 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15092 			    " Giving up");
15093 
15094 			fcp_log(CE_WARN, pptr->port_dip, buf,
15095 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15096 			    asc, ascq);
15097 		} else {
15098 			(void) sprintf(buf + strlen(buf),
15099 			    " : SCSI status=%%x. Giving up");
15100 
15101 			fcp_log(CE_WARN, pptr->port_dip, buf,
15102 			    ptgt->tgt_d_id, plun->lun_num,
15103 			    rsp->fcp_u.fcp_status.scsi_status);
15104 		}
15105 	} else {
15106 		caddr_t state, reason, action, expln;
15107 
15108 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15109 		    &action, &expln);
15110 
15111 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15112 		    " Reason:%%s. Giving up");
15113 
15114 		if (scsi_cmd) {
15115 			fcp_log(CE_WARN, pptr->port_dip, buf,
15116 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15117 		} else {
15118 			fcp_log(CE_WARN, pptr->port_dip, buf,
15119 			    ptgt->tgt_d_id, state, reason);
15120 		}
15121 	}
15122 
15123 	kmem_free(buf, 256);
15124 }
15125 
15126 
15127 static int
15128 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15129     struct fcp_ipkt *icmd, int rval, caddr_t op)
15130 {
15131 	int	ret = DDI_FAILURE;
15132 	char	*error;
15133 
15134 	switch (rval) {
15135 	case FC_DEVICE_BUSY_NEW_RSCN:
15136 		/*
15137 		 * This means that there was a new RSCN that the transport
15138 		 * knows about (which the ULP *may* know about too) but the
15139 		 * pkt that was sent down was related to an older RSCN. So, we
15140 		 * are just going to reset the retry count and deadline and
15141 		 * continue to retry. The idea is that transport is currently
15142 		 * working on the new RSCN and will soon let the ULPs know
15143 		 * about it and when it does the existing logic will kick in
15144 		 * where it will change the tcount to indicate that something
15145 		 * changed on the target. So, rediscovery will start and there
15146 		 * will not be an infinite retry.
15147 		 *
15148 		 * For a full flow of how the RSCN info is transferred back and
15149 		 * forth, see fp.c
15150 		 */
15151 		icmd->ipkt_retries = 0;
15152 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15153 		    FCP_ICMD_DEADLINE;
15154 
15155 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15156 		    FCP_BUF_LEVEL_3, 0,
15157 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15158 		    rval, ptgt->tgt_d_id);
15159 		/* FALLTHROUGH */
15160 
15161 	case FC_STATEC_BUSY:
15162 	case FC_DEVICE_BUSY:
15163 	case FC_PBUSY:
15164 	case FC_FBUSY:
15165 	case FC_TRAN_BUSY:
15166 	case FC_OFFLINE:
15167 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15168 		    FCP_BUF_LEVEL_3, 0,
15169 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15170 		    rval, ptgt->tgt_d_id);
15171 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15172 		    fcp_is_retryable(icmd)) {
15173 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15174 			ret = DDI_SUCCESS;
15175 		}
15176 		break;
15177 
15178 	case FC_LOGINREQ:
15179 		/*
15180 		 * FC_LOGINREQ used to be handled just like all the cases
15181 		 * above. It has been changed to handled a PRLI that fails
15182 		 * with FC_LOGINREQ different than other ipkts that fail
15183 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15184 		 * a simple matter to turn it into a PLOGI instead, so that's
15185 		 * exactly what we do here.
15186 		 */
15187 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15188 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15189 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15190 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15191 		} else {
15192 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15193 			    FCP_BUF_LEVEL_3, 0,
15194 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15195 			    rval, ptgt->tgt_d_id);
15196 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15197 			    fcp_is_retryable(icmd)) {
15198 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15199 				ret = DDI_SUCCESS;
15200 			}
15201 		}
15202 		break;
15203 
15204 	default:
15205 		mutex_enter(&pptr->port_mutex);
15206 		mutex_enter(&ptgt->tgt_mutex);
15207 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15208 			mutex_exit(&ptgt->tgt_mutex);
15209 			mutex_exit(&pptr->port_mutex);
15210 
15211 			(void) fc_ulp_error(rval, &error);
15212 			fcp_log(CE_WARN, pptr->port_dip,
15213 			    "!Failed to send %s to D_ID=%x error=%s",
15214 			    op, ptgt->tgt_d_id, error);
15215 		} else {
15216 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15217 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15218 			    "fcp_handle_ipkt_errors,1: state change occured"
15219 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15220 			mutex_exit(&ptgt->tgt_mutex);
15221 			mutex_exit(&pptr->port_mutex);
15222 		}
15223 		break;
15224 	}
15225 
15226 	return (ret);
15227 }
15228 
15229 
15230 /*
15231  * Check of outstanding commands on any LUN for this target
15232  */
15233 static int
15234 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15235 {
15236 	struct	fcp_lun	*plun;
15237 	struct	fcp_pkt	*cmd;
15238 
15239 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15240 		mutex_enter(&plun->lun_mutex);
15241 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15242 		    cmd = cmd->cmd_forw) {
15243 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15244 				mutex_exit(&plun->lun_mutex);
15245 				return (FC_SUCCESS);
15246 			}
15247 		}
15248 		mutex_exit(&plun->lun_mutex);
15249 	}
15250 
15251 	return (FC_FAILURE);
15252 }
15253 
15254 static fc_portmap_t *
15255 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15256 {
15257 	int			i;
15258 	fc_portmap_t		*devlist;
15259 	fc_portmap_t		*devptr = NULL;
15260 	struct fcp_tgt	*ptgt;
15261 
15262 	mutex_enter(&pptr->port_mutex);
15263 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15264 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15265 		    ptgt = ptgt->tgt_next) {
15266 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15267 				++*dev_cnt;
15268 			}
15269 		}
15270 	}
15271 
15272 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15273 	    KM_NOSLEEP);
15274 	if (devlist == NULL) {
15275 		mutex_exit(&pptr->port_mutex);
15276 		fcp_log(CE_WARN, pptr->port_dip,
15277 		    "!fcp%d: failed to allocate for portmap for construct map",
15278 		    pptr->port_instance);
15279 		return (devptr);
15280 	}
15281 
15282 	for (i = 0; i < FCP_NUM_HASH; i++) {
15283 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15284 		    ptgt = ptgt->tgt_next) {
15285 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15286 				int ret;
15287 
15288 				ret = fc_ulp_pwwn_to_portmap(
15289 				    pptr->port_fp_handle,
15290 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15291 				    devlist);
15292 
15293 				if (ret == FC_SUCCESS) {
15294 					devlist++;
15295 					continue;
15296 				}
15297 
15298 				devlist->map_pd = NULL;
15299 				devlist->map_did.port_id = ptgt->tgt_d_id;
15300 				devlist->map_hard_addr.hard_addr =
15301 				    ptgt->tgt_hard_addr;
15302 
15303 				devlist->map_state = PORT_DEVICE_INVALID;
15304 				devlist->map_type = PORT_DEVICE_OLD;
15305 
15306 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15307 				    &devlist->map_nwwn, FC_WWN_SIZE);
15308 
15309 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15310 				    &devlist->map_pwwn, FC_WWN_SIZE);
15311 
15312 				devlist++;
15313 			}
15314 		}
15315 	}
15316 
15317 	mutex_exit(&pptr->port_mutex);
15318 
15319 	return (devptr);
15320 }
15321 /*
15322  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15323  */
15324 static void
15325 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15326 {
15327 	int i;
15328 	struct fcp_tgt	*ptgt;
15329 	struct fcp_lun	*plun;
15330 
15331 	for (i = 0; i < FCP_NUM_HASH; i++) {
15332 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15333 		    ptgt = ptgt->tgt_next) {
15334 			mutex_enter(&ptgt->tgt_mutex);
15335 			for (plun = ptgt->tgt_lun; plun != NULL;
15336 			    plun = plun->lun_next) {
15337 				if (plun->lun_mpxio &&
15338 				    plun->lun_state & FCP_LUN_BUSY) {
15339 					if (!fcp_pass_to_hp(pptr, plun,
15340 					    plun->lun_cip,
15341 					    FCP_MPXIO_PATH_SET_BUSY,
15342 					    pptr->port_link_cnt,
15343 					    ptgt->tgt_change_cnt, 0, 0)) {
15344 						FCP_TRACE(fcp_logq,
15345 						    pptr->port_instbuf,
15346 						    fcp_trace,
15347 						    FCP_BUF_LEVEL_2, 0,
15348 						    "path_verifybusy: "
15349 						    "disable lun %p failed!",
15350 						    plun);
15351 					}
15352 				}
15353 			}
15354 			mutex_exit(&ptgt->tgt_mutex);
15355 		}
15356 	}
15357 }
15358 
15359 static int
15360 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15361 {
15362 	dev_info_t		*cdip = NULL;
15363 	dev_info_t		*pdip = NULL;
15364 
15365 	ASSERT(plun);
15366 
15367 	mutex_enter(&plun->lun_mutex);
15368 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15369 		mutex_exit(&plun->lun_mutex);
15370 		return (NDI_FAILURE);
15371 	}
15372 	mutex_exit(&plun->lun_mutex);
15373 	cdip = mdi_pi_get_client(PIP(cip));
15374 	pdip = mdi_pi_get_phci(PIP(cip));
15375 
15376 	ASSERT(cdip != NULL);
15377 	ASSERT(pdip != NULL);
15378 
15379 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15380 		/* LUN ready for IO */
15381 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15382 	} else {
15383 		/* LUN busy to accept IO */
15384 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15385 	}
15386 	return (NDI_SUCCESS);
15387 }
15388 
15389 /*
15390  * Caller must free the returned string of MAXPATHLEN len
15391  * If the device is offline (-1 instance number) NULL
15392  * will be returned.
15393  */
15394 static char *
15395 fcp_get_lun_path(struct fcp_lun *plun) {
15396 	dev_info_t	*dip = NULL;
15397 	char	*path = NULL;
15398 	if (plun == NULL) {
15399 		return (NULL);
15400 	}
15401 	if (plun->lun_mpxio == 0) {
15402 		dip = DIP(plun->lun_cip);
15403 	} else {
15404 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15405 	}
15406 	if (dip == NULL) {
15407 		return (NULL);
15408 	}
15409 	if (ddi_get_instance(dip) < 0) {
15410 		return (NULL);
15411 	}
15412 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15413 	if (path == NULL) {
15414 		return (NULL);
15415 	}
15416 
15417 	(void) ddi_pathname(dip, path);
15418 	/*
15419 	 * In reality, the user wants a fully valid path (one they can open)
15420 	 * but this string is lacking the mount point, and the minor node.
15421 	 * It would be nice if we could "figure these out" somehow
15422 	 * and fill them in.  Otherwise, the userland code has to understand
15423 	 * driver specific details of which minor node is the "best" or
15424 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15425 	 * which tape doesn't rewind)
15426 	 */
15427 	return (path);
15428 }
15429 
15430 static int
15431 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15432     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15433 {
15434 	int64_t reset_delay;
15435 	int rval, retry = 0;
15436 	struct fcp_port *pptr = fcp_dip2port(parent);
15437 
15438 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15439 	    (lbolt64 - pptr->port_attach_time);
15440 	if (reset_delay < 0) {
15441 		reset_delay = 0;
15442 	}
15443 
15444 	if (fcp_bus_config_debug) {
15445 		flag |= NDI_DEVI_DEBUG;
15446 	}
15447 
15448 	switch (op) {
15449 	case BUS_CONFIG_ONE:
15450 		/*
15451 		 * Retry the command since we need to ensure
15452 		 * the fabric devices are available for root
15453 		 */
15454 		while (retry++ < fcp_max_bus_config_retries) {
15455 			rval =	(ndi_busop_bus_config(parent,
15456 			    flag | NDI_MDI_FALLBACK, op,
15457 			    arg, childp, (clock_t)reset_delay));
15458 			if (rval == 0) {
15459 				return (rval);
15460 			}
15461 		}
15462 
15463 		/*
15464 		 * drain taskq to make sure nodes are created and then
15465 		 * try again.
15466 		 */
15467 		taskq_wait(DEVI(parent)->devi_taskq);
15468 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15469 		    op, arg, childp, 0));
15470 
15471 	case BUS_CONFIG_DRIVER:
15472 	case BUS_CONFIG_ALL: {
15473 		/*
15474 		 * delay till all devices report in (port_tmp_cnt == 0)
15475 		 * or FCP_INIT_WAIT_TIMEOUT
15476 		 */
15477 		mutex_enter(&pptr->port_mutex);
15478 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15479 			(void) cv_timedwait(&pptr->port_config_cv,
15480 			    &pptr->port_mutex,
15481 			    ddi_get_lbolt() + (clock_t)reset_delay);
15482 			reset_delay =
15483 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15484 			    (lbolt64 - pptr->port_attach_time);
15485 		}
15486 		mutex_exit(&pptr->port_mutex);
15487 		/* drain taskq to make sure nodes are created */
15488 		taskq_wait(DEVI(parent)->devi_taskq);
15489 		return (ndi_busop_bus_config(parent, flag, op,
15490 		    arg, childp, 0));
15491 	}
15492 
15493 	default:
15494 		return (NDI_FAILURE);
15495 	}
15496 	/*NOTREACHED*/
15497 }
15498 
15499 static int
15500 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15501     ddi_bus_config_op_t op, void *arg)
15502 {
15503 	if (fcp_bus_config_debug) {
15504 		flag |= NDI_DEVI_DEBUG;
15505 	}
15506 
15507 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15508 }
15509 
15510 
15511 /*
15512  * Routine to copy GUID into the lun structure.
15513  * returns 0 if copy was successful and 1 if encountered a
15514  * failure and did not copy the guid.
15515  */
15516 static int
15517 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15518 {
15519 
15520 	int retval = 0;
15521 
15522 	/* add one for the null terminator */
15523 	const unsigned int len = strlen(guidp) + 1;
15524 
15525 	if ((guidp == NULL) || (plun == NULL)) {
15526 		return (1);
15527 	}
15528 
15529 	/*
15530 	 * if the plun->lun_guid already has been allocated,
15531 	 * then check the size. if the size is exact, reuse
15532 	 * it....if not free it an allocate the required size.
15533 	 * The reallocation should NOT typically happen
15534 	 * unless the GUIDs reported changes between passes.
15535 	 * We free up and alloc again even if the
15536 	 * size was more than required. This is due to the
15537 	 * fact that the field lun_guid_size - serves
15538 	 * dual role of indicating the size of the wwn
15539 	 * size and ALSO the allocation size.
15540 	 */
15541 	if (plun->lun_guid) {
15542 		if (plun->lun_guid_size != len) {
15543 			/*
15544 			 * free the allocated memory and
15545 			 * initialize the field
15546 			 * lun_guid_size to 0.
15547 			 */
15548 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15549 			plun->lun_guid = NULL;
15550 			plun->lun_guid_size = 0;
15551 		}
15552 	}
15553 	/*
15554 	 * alloc only if not already done.
15555 	 */
15556 	if (plun->lun_guid == NULL) {
15557 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15558 		if (plun->lun_guid == NULL) {
15559 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15560 			    "Unable to allocate"
15561 			    "Memory for GUID!!! size %d", len);
15562 			retval = 1;
15563 		} else {
15564 			plun->lun_guid_size = len;
15565 		}
15566 	}
15567 	if (plun->lun_guid) {
15568 		/*
15569 		 * now copy the GUID
15570 		 */
15571 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15572 	}
15573 	return (retval);
15574 }
15575 
15576 /*
15577  * fcp_reconfig_wait
15578  *
15579  * Wait for a rediscovery/reconfiguration to complete before continuing.
15580  */
15581 
15582 static void
15583 fcp_reconfig_wait(struct fcp_port *pptr)
15584 {
15585 	clock_t		reconfig_start, wait_timeout;
15586 
15587 	/*
15588 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15589 	 * reconfiguration in progress.
15590 	 */
15591 
15592 	mutex_enter(&pptr->port_mutex);
15593 	if (pptr->port_tmp_cnt == 0) {
15594 		mutex_exit(&pptr->port_mutex);
15595 		return;
15596 	}
15597 	mutex_exit(&pptr->port_mutex);
15598 
15599 	/*
15600 	 * If we cause a reconfig by raising power, delay until all devices
15601 	 * report in (port_tmp_cnt returns to 0)
15602 	 */
15603 
15604 	reconfig_start = ddi_get_lbolt();
15605 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15606 
15607 	mutex_enter(&pptr->port_mutex);
15608 
15609 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15610 	    pptr->port_tmp_cnt) {
15611 
15612 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15613 		    reconfig_start + wait_timeout);
15614 	}
15615 
15616 	mutex_exit(&pptr->port_mutex);
15617 
15618 	/*
15619 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15620 	 * we want may still be ok.  If not, it will error out later
15621 	 */
15622 }
15623 
15624 /*
15625  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15626  * We rely on the fcp_global_mutex to provide protection against changes to
15627  * the fcp_lun_blacklist.
15628  *
15629  * You can describe a list of target port WWNs and LUN numbers which will
15630  * not be configured. LUN numbers will be interpreted as decimal. White
15631  * spaces and ',' can be used in the list of LUN numbers.
15632  *
15633  * To prevent LUNs 1 and 2 from being configured for target
15634  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15635  *
15636  * pwwn-lun-blacklist=
15637  * "510000f010fd92a1,1,2",
15638  * "510000e012079df1,1,2";
15639  */
15640 static void
15641 fcp_read_blacklist(dev_info_t *dip,
15642     struct fcp_black_list_entry **pplun_blacklist) {
15643 	char **prop_array	= NULL;
15644 	char *curr_pwwn		= NULL;
15645 	char *curr_lun		= NULL;
15646 	uint32_t prop_item	= 0;
15647 	int idx			= 0;
15648 	int len			= 0;
15649 
15650 	ASSERT(mutex_owned(&fcp_global_mutex));
15651 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15652 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15653 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15654 		return;
15655 	}
15656 
15657 	for (idx = 0; idx < prop_item; idx++) {
15658 
15659 		curr_pwwn = prop_array[idx];
15660 		while (*curr_pwwn == ' ') {
15661 			curr_pwwn++;
15662 		}
15663 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15664 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15665 			    ", please check.", curr_pwwn);
15666 			continue;
15667 		}
15668 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15669 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15670 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15671 			    ", please check.", curr_pwwn);
15672 			continue;
15673 		}
15674 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15675 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15676 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15677 				    "blacklist, please check.", curr_pwwn);
15678 				break;
15679 			}
15680 		}
15681 		if (len != sizeof (la_wwn_t) * 2) {
15682 			continue;
15683 		}
15684 
15685 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15686 		*(curr_lun - 1) = '\0';
15687 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15688 	}
15689 
15690 	ddi_prop_free(prop_array);
15691 }
15692 
15693 /*
15694  * Get the masking info about one remote target port designated by wwn.
15695  * Lun ids could be separated by ',' or white spaces.
15696  */
15697 static void
15698 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15699     struct fcp_black_list_entry **pplun_blacklist) {
15700 	int		idx			= 0;
15701 	uint32_t	offset			= 0;
15702 	unsigned long	lun_id			= 0;
15703 	char		lunid_buf[16];
15704 	char		*pend			= NULL;
15705 	int		illegal_digit		= 0;
15706 
15707 	while (offset < strlen(curr_lun)) {
15708 		while ((curr_lun[offset + idx] != ',') &&
15709 		    (curr_lun[offset + idx] != '\0') &&
15710 		    (curr_lun[offset + idx] != ' ')) {
15711 			if (isdigit(curr_lun[offset + idx]) == 0) {
15712 				illegal_digit++;
15713 			}
15714 			idx++;
15715 		}
15716 		if (illegal_digit > 0) {
15717 			offset += (idx+1);	/* To the start of next lun */
15718 			idx = 0;
15719 			illegal_digit = 0;
15720 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15721 			    "the blacklist, please check digits.",
15722 			    curr_lun, curr_pwwn);
15723 			continue;
15724 		}
15725 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15726 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15727 			    "the blacklist, please check the length of LUN#.",
15728 			    curr_lun, curr_pwwn);
15729 			break;
15730 		}
15731 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15732 		    offset++;
15733 		    continue;
15734 		}
15735 
15736 		bcopy(curr_lun + offset, lunid_buf, idx);
15737 		lunid_buf[idx] = '\0';
15738 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15739 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15740 		} else {
15741 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15742 			    "the blacklist, please check %s.",
15743 			    curr_lun, curr_pwwn, lunid_buf);
15744 		}
15745 		offset += (idx+1);	/* To the start of next lun */
15746 		idx = 0;
15747 	}
15748 }
15749 
15750 /*
15751  * Add one masking record
15752  */
15753 static void
15754 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15755     struct fcp_black_list_entry **pplun_blacklist) {
15756 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15757 	struct fcp_black_list_entry	*new_entry	= NULL;
15758 	la_wwn_t			wwn;
15759 
15760 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15761 	while (tmp_entry) {
15762 		if ((bcmp(&tmp_entry->wwn, &wwn,
15763 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15764 			return;
15765 		}
15766 
15767 		tmp_entry = tmp_entry->next;
15768 	}
15769 
15770 	/* add to black list */
15771 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15772 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15773 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15774 	new_entry->lun = lun_id;
15775 	new_entry->masked = 0;
15776 	new_entry->next = *pplun_blacklist;
15777 	*pplun_blacklist = new_entry;
15778 }
15779 
15780 /*
15781  * Check if we should mask the specified lun of this fcp_tgt
15782  */
15783 static int
15784 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
15785 	struct fcp_black_list_entry *remote_port;
15786 
15787 	remote_port = fcp_lun_blacklist;
15788 	while (remote_port != NULL) {
15789 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
15790 			if (remote_port->lun == lun_id) {
15791 				remote_port->masked++;
15792 				if (remote_port->masked == 1) {
15793 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
15794 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
15795 					    "is masked due to black listing.\n",
15796 					    lun_id, wwn->raw_wwn[0],
15797 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
15798 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
15799 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
15800 					    wwn->raw_wwn[7]);
15801 				}
15802 				return (TRUE);
15803 			}
15804 		}
15805 		remote_port = remote_port->next;
15806 	}
15807 	return (FALSE);
15808 }
15809 
15810 /*
15811  * Release all allocated resources
15812  */
15813 static void
15814 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
15815 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15816 	struct fcp_black_list_entry	*current_entry	= NULL;
15817 
15818 	ASSERT(mutex_owned(&fcp_global_mutex));
15819 	/*
15820 	 * Traverse all luns
15821 	 */
15822 	while (tmp_entry) {
15823 		current_entry = tmp_entry;
15824 		tmp_entry = tmp_entry->next;
15825 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
15826 	}
15827 	*pplun_blacklist = NULL;
15828 }
15829