1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
25  * Copyright 2019 Joyent, Inc.
26  * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27  * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28  * Copyright 2023 Oxide Computer Company
29  * Copyright 2023 Racktop Systems, Inc.
30  */
31 
32 /*
33  * Copyright (c) 2000 to 2010, LSI Corporation.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms of all code within
37  * this file that is exclusively owned by LSI, with or without
38  * modification, is permitted provided that, in addition to the CDDL 1.0
39  * License requirements, the following conditions are met:
40  *
41  *    Neither the name of the author nor the names of its contributors may be
42  *    used to endorse or promote products derived from this software without
43  *    specific prior written permission.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
48  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
49  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
50  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
51  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
52  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
53  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
54  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
55  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
56  * DAMAGE.
57  */
58 
59 /*
60  * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
61  *
62  */
63 
64 #if defined(lint) || defined(DEBUG)
65 #define	MPTSAS_DEBUG
66 #endif
67 
68 /*
69  * standard header files.
70  */
71 #include <sys/note.h>
72 #include <sys/scsi/scsi.h>
73 #include <sys/pci.h>
74 #include <sys/file.h>
75 #include <sys/policy.h>
76 #include <sys/model.h>
77 #include <sys/refhash.h>
78 #include <sys/sysevent.h>
79 #include <sys/sysevent/eventdefs.h>
80 #include <sys/sysevent/dr.h>
81 #include <sys/sata/sata_defs.h>
82 #include <sys/sata/sata_hba.h>
83 #include <sys/scsi/generic/sas.h>
84 #include <sys/scsi/impl/scsi_sas.h>
85 
86 #pragma pack(1)
87 #include <sys/scsi/adapters/mpi/mpi2_type.h>
88 #include <sys/scsi/adapters/mpi/mpi2.h>
89 #include <sys/scsi/adapters/mpi/mpi2_cnfg.h>
90 #include <sys/scsi/adapters/mpi/mpi2_init.h>
91 #include <sys/scsi/adapters/mpi/mpi2_ioc.h>
92 #include <sys/scsi/adapters/mpi/mpi2_sas.h>
93 #include <sys/scsi/adapters/mpi/mpi2_tool.h>
94 #include <sys/scsi/adapters/mpi/mpi2_raid.h>
95 #pragma pack()
96 
97 /*
98  * private header files.
99  *
100  */
101 #include <sys/scsi/impl/scsi_reset_notify.h>
102 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
103 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
104 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
105 #include <sys/raidioctl.h>
106 
107 #include <sys/fs/dv_node.h>	/* devfs_clean */
108 
109 /*
110  * FMA header files
111  */
112 #include <sys/ddifm.h>
113 #include <sys/fm/protocol.h>
114 #include <sys/fm/util.h>
115 #include <sys/fm/io/ddi.h>
116 
117 /*
118  * autoconfiguration data and routines.
119  */
120 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
121 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
122 static int mptsas_power(dev_info_t *dip, int component, int level);
123 
124 /*
125  * cb_ops function
126  */
127 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
128 	cred_t *credp, int *rval);
129 #ifdef __sparc
130 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
131 #else  /* __sparc */
132 static int mptsas_quiesce(dev_info_t *devi);
133 #endif	/* __sparc */
134 
135 /*
136  * ddi_ufm_ops
137  */
138 static int mptsas_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg,
139     uint_t imgno, ddi_ufm_image_t *img);
140 static int mptsas_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg,
141     uint_t imgno, uint_t slotno, ddi_ufm_slot_t *slot);
142 static int mptsas_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg,
143     ddi_ufm_cap_t *caps);
144 
145 /*
146  * Resource initialization for hardware
147  */
148 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
149 static void mptsas_disable_bus_master(mptsas_t *mpt);
150 static void mptsas_hba_fini(mptsas_t *mpt);
151 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
152 static int mptsas_hba_setup(mptsas_t *mpt);
153 static void mptsas_hba_teardown(mptsas_t *mpt);
154 static int mptsas_config_space_init(mptsas_t *mpt);
155 static void mptsas_config_space_fini(mptsas_t *mpt);
156 static void mptsas_iport_register(mptsas_t *mpt);
157 static int mptsas_smp_setup(mptsas_t *mpt);
158 static void mptsas_smp_teardown(mptsas_t *mpt);
159 static int mptsas_enc_setup(mptsas_t *mpt);
160 static void mptsas_enc_teardown(mptsas_t *mpt);
161 static int mptsas_cache_create(mptsas_t *mpt);
162 static void mptsas_cache_destroy(mptsas_t *mpt);
163 static int mptsas_alloc_request_frames(mptsas_t *mpt);
164 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
165 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
166 static int mptsas_alloc_free_queue(mptsas_t *mpt);
167 static int mptsas_alloc_post_queue(mptsas_t *mpt);
168 static void mptsas_alloc_reply_args(mptsas_t *mpt);
169 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
170 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
171 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
172 static void mptsas_update_hashtab(mptsas_t *mpt);
173 
174 /*
175  * SCSA function prototypes
176  */
177 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
178 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
179 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
180 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
181 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
182     int tgtonly);
183 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
184 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
185     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
186 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
187 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
188 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
189     struct scsi_pkt *pkt);
190 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
191     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
192 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
193     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
194 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
195     void (*callback)(caddr_t), caddr_t arg);
196 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
197 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
198 static int mptsas_scsi_quiesce(dev_info_t *dip);
199 static int mptsas_scsi_unquiesce(dev_info_t *dip);
200 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
201     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
202 
203 /*
204  * SMP functions
205  */
206 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
207 
208 /*
209  * internal function prototypes.
210  */
211 static void mptsas_list_add(mptsas_t *mpt);
212 static void mptsas_list_del(mptsas_t *mpt);
213 
214 static int mptsas_quiesce_bus(mptsas_t *mpt);
215 static int mptsas_unquiesce_bus(mptsas_t *mpt);
216 
217 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
218 static void mptsas_free_handshake_msg(mptsas_t *mpt);
219 
220 static void mptsas_ncmds_checkdrain(void *arg);
221 
222 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
223 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
224 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
225 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
226 
227 static int mptsas_do_detach(dev_info_t *dev);
228 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
229 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
230     struct scsi_pkt *pkt);
231 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
232 
233 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
234 static void mptsas_handle_event(void *args);
235 static int mptsas_handle_event_sync(void *args);
236 static void mptsas_handle_dr(void *args);
237 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
238     dev_info_t *pdip);
239 
240 static void mptsas_restart_cmd(void *);
241 
242 static void mptsas_flush_hba(mptsas_t *mpt);
243 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
244 	uint8_t tasktype);
245 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
246     uchar_t reason, uint_t stat);
247 
248 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
249 static void mptsas_process_intr(mptsas_t *mpt,
250     pMpi2ReplyDescriptorsUnion_t reply_desc_union);
251 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
252     pMpi2ReplyDescriptorsUnion_t reply_desc);
253 static void mptsas_handle_address_reply(mptsas_t *mpt,
254     pMpi2ReplyDescriptorsUnion_t reply_desc);
255 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
256 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
257     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
258 
259 static void mptsas_watch(void *arg);
260 static void mptsas_watchsubr(mptsas_t *mpt);
261 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
262 
263 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
264 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
265     uint8_t *data, uint32_t request_size, uint32_t reply_size,
266     uint32_t data_size, uint32_t direction, uint8_t *dataout,
267     uint32_t dataout_size, short timeout, int mode);
268 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
269 
270 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
271     uint32_t unique_id);
272 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
273 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
274     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
275 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
276     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
277     uint32_t diag_type);
278 static int mptsas_diag_register(mptsas_t *mpt,
279     mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
280 static int mptsas_diag_unregister(mptsas_t *mpt,
281     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
282 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
283     uint32_t *return_code);
284 static int mptsas_diag_read_buffer(mptsas_t *mpt,
285     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
286     uint32_t *return_code, int ioctl_mode);
287 static int mptsas_diag_release(mptsas_t *mpt,
288     mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
289 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
290     uint8_t *diag_action, uint32_t length, uint32_t *return_code,
291     int ioctl_mode);
292 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
293     int mode);
294 
295 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
296     int cmdlen, int tgtlen, int statuslen, int kf);
297 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
298 
299 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
300 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
301 
302 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
303     int kmflags);
304 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
305 
306 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
307     mptsas_cmd_t *cmd);
308 static void mptsas_check_task_mgt(mptsas_t *mpt,
309     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
310 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
311     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
312     int *resid);
313 
314 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
315 static void mptsas_free_active_slots(mptsas_t *mpt);
316 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
317 
318 static void mptsas_restart_hba(mptsas_t *mpt);
319 static void mptsas_restart_waitq(mptsas_t *mpt);
320 
321 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
322 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
323 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
324 
325 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
326 static void mptsas_doneq_empty(mptsas_t *mpt);
327 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
328 
329 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
330 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
331 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
332 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
333 
334 
335 static void mptsas_start_watch_reset_delay();
336 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
337 static void mptsas_watch_reset_delay(void *arg);
338 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
339 
340 /*
341  * helper functions
342  */
343 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
344 
345 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
346 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
347 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
348     int lun);
349 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
350     int lun);
351 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
352 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
353 
354 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
355     int *lun);
356 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
357 
358 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
359     mptsas_phymask_t phymask, uint8_t phy);
360 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
361     mptsas_phymask_t phymask, uint64_t wwid);
362 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
363     mptsas_phymask_t phymask, uint64_t wwid);
364 
365 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
366     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
367 
368 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
369     uint16_t *handle, mptsas_target_t **pptgt);
370 static void mptsas_update_phymask(mptsas_t *mpt);
371 
372 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep,
373     uint16_t idx);
374 static int mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
375     uint32_t *status, uint8_t cmd);
376 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
377     mptsas_phymask_t *phymask);
378 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
379     mptsas_phymask_t phymask);
380 
381 
382 /*
383  * Enumeration / DR functions
384  */
385 static void mptsas_config_all(dev_info_t *pdip);
386 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
387     dev_info_t **lundip);
388 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
389     dev_info_t **lundip);
390 
391 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
392 static int mptsas_offline_target(dev_info_t *pdip, char *name);
393 
394 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
395     dev_info_t **dip);
396 
397 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
398 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
399     dev_info_t **dip, mptsas_target_t *ptgt);
400 
401 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
402     dev_info_t **dip, mptsas_target_t *ptgt, int lun);
403 
404 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
405     char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
406 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
407     char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
408     int lun);
409 
410 static void mptsas_offline_missed_luns(dev_info_t *pdip,
411     uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
412 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
413     mdi_pathinfo_t *rpip, uint_t flags);
414 
415 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
416     dev_info_t **smp_dip);
417 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
418     uint_t flags);
419 
420 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
421     int mode, int *rval);
422 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
423     int mode, int *rval);
424 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
425     int mode, int *rval);
426 static void mptsas_record_event(void *args);
427 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
428     int mode);
429 
430 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
431     uint32_t, mptsas_phymask_t, uint8_t);
432 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
433 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
434     dev_info_t **smp_dip);
435 
436 /*
437  * Power management functions
438  */
439 static int mptsas_get_pci_cap(mptsas_t *mpt);
440 static int mptsas_init_pm(mptsas_t *mpt);
441 
442 /*
443  * MPT MSI tunable:
444  *
445  * By default MSI is enabled on all supported platforms.
446  */
447 boolean_t mptsas_enable_msi = B_TRUE;
448 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
449 
450 /*
451  * Global switch for use of MPI2.5 FAST PATH.
452  * We don't really know what FAST PATH actually does, so if it is suspected
453  * to cause problems it can be turned off by setting this variable to B_FALSE.
454  */
455 boolean_t mptsas_use_fastpath = B_TRUE;
456 
457 static int mptsas_register_intrs(mptsas_t *);
458 static void mptsas_unregister_intrs(mptsas_t *);
459 static int mptsas_add_intrs(mptsas_t *, int);
460 static void mptsas_rem_intrs(mptsas_t *);
461 
462 /*
463  * FMA Prototypes
464  */
465 static void mptsas_fm_init(mptsas_t *mpt);
466 static void mptsas_fm_fini(mptsas_t *mpt);
467 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
468 
469 extern pri_t minclsyspri, maxclsyspri;
470 
471 /*
472  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
473  * under this device that the paths to a physical device are created when
474  * MPxIO is used.
475  */
476 extern dev_info_t	*scsi_vhci_dip;
477 
478 /*
479  * Tunable timeout value for Inquiry VPD page 0x83
480  * By default the value is 30 seconds.
481  */
482 int mptsas_inq83_retry_timeout = 30;
483 
484 /*
485  * This is used to allocate memory for message frame storage, not for
486  * data I/O DMA. All message frames must be stored in the first 4G of
487  * physical memory.
488  */
489 ddi_dma_attr_t mptsas_dma_attrs = {
490 	DMA_ATTR_V0,	/* attribute layout version		*/
491 	0x0ull,		/* address low - should be 0 (longlong)	*/
492 	0xffffffffull,	/* address high - 32-bit max range	*/
493 	0x00ffffffull,	/* count max - max DMA object size	*/
494 	4,		/* allocation alignment requirements	*/
495 	0x78,		/* burstsizes - binary encoded values	*/
496 	1,		/* minxfer - gran. of DMA engine	*/
497 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
498 	0xffffffffull,	/* max segment size (DMA boundary)	*/
499 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
500 	512,		/* granularity - device transfer size	*/
501 	0		/* flags, set to 0			*/
502 };
503 
504 /*
505  * This is used for data I/O DMA memory allocation. (full 64-bit DMA
506  * physical addresses are supported.)
507  */
508 ddi_dma_attr_t mptsas_dma_attrs64 = {
509 	DMA_ATTR_V0,	/* attribute layout version		*/
510 	0x0ull,		/* address low - should be 0 (longlong)	*/
511 	0xffffffffffffffffull,	/* address high - 64-bit max	*/
512 	0x00ffffffull,	/* count max - max DMA object size	*/
513 	4,		/* allocation alignment requirements	*/
514 	0x78,		/* burstsizes - binary encoded values	*/
515 	1,		/* minxfer - gran. of DMA engine	*/
516 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
517 	0xffffffffull,	/* max segment size (DMA boundary)	*/
518 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
519 	512,		/* granularity - device transfer size	*/
520 	0		/* flags, set to 0 */
521 };
522 
523 ddi_device_acc_attr_t mptsas_dev_attr = {
524 	DDI_DEVICE_ATTR_V1,
525 	DDI_STRUCTURE_LE_ACC,
526 	DDI_STRICTORDER_ACC,
527 	DDI_DEFAULT_ACC
528 };
529 
530 static struct cb_ops mptsas_cb_ops = {
531 	scsi_hba_open,		/* open */
532 	scsi_hba_close,		/* close */
533 	nodev,			/* strategy */
534 	nodev,			/* print */
535 	nodev,			/* dump */
536 	nodev,			/* read */
537 	nodev,			/* write */
538 	mptsas_ioctl,		/* ioctl */
539 	nodev,			/* devmap */
540 	nodev,			/* mmap */
541 	nodev,			/* segmap */
542 	nochpoll,		/* chpoll */
543 	ddi_prop_op,		/* cb_prop_op */
544 	NULL,			/* streamtab */
545 	D_MP,			/* cb_flag */
546 	CB_REV,			/* rev */
547 	nodev,			/* aread */
548 	nodev			/* awrite */
549 };
550 
551 static struct dev_ops mptsas_ops = {
552 	DEVO_REV,		/* devo_rev, */
553 	0,			/* refcnt  */
554 	ddi_no_info,		/* info */
555 	nulldev,		/* identify */
556 	nulldev,		/* probe */
557 	mptsas_attach,		/* attach */
558 	mptsas_detach,		/* detach */
559 #ifdef  __sparc
560 	mptsas_reset,
561 #else
562 	nodev,			/* reset */
563 #endif  /* __sparc */
564 	&mptsas_cb_ops,		/* driver operations */
565 	NULL,			/* bus operations */
566 	mptsas_power,		/* power management */
567 #ifdef	__sparc
568 	ddi_quiesce_not_needed
569 #else
570 	mptsas_quiesce		/* quiesce */
571 #endif	/* __sparc */
572 };
573 
574 static ddi_ufm_ops_t mptsas_ufm_ops = {
575 	NULL,
576 	mptsas_ufm_fill_image,
577 	mptsas_ufm_fill_slot,
578 	mptsas_ufm_getcaps
579 };
580 
581 #define	MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
582 
583 static struct modldrv modldrv = {
584 	&mod_driverops,	/* Type of module. This one is a driver */
585 	MPTSAS_MOD_STRING, /* Name of the module. */
586 	&mptsas_ops,	/* driver ops */
587 };
588 
589 static struct modlinkage modlinkage = {
590 	MODREV_1, &modldrv, NULL
591 };
592 #define	TARGET_PROP	"target"
593 #define	LUN_PROP	"lun"
594 #define	LUN64_PROP	"lun64"
595 #define	SAS_PROP	"sas-mpt"
596 #define	MDI_GUID	"wwn"
597 #define	NDI_GUID	"guid"
598 #define	MPTSAS_DEV_GONE	"mptsas_dev_gone"
599 
600 /*
601  * Local static data
602  */
603 #if defined(MPTSAS_DEBUG)
604 /*
605  * Flags to indicate which debug messages are to be printed and which go to the
606  * debug log ring buffer. Default is to not print anything, and to log
607  * everything except the watchsubr() output which normally happens every second.
608  */
609 uint32_t mptsas_debugprt_flags = 0x0;
610 uint32_t mptsas_debuglog_flags = ~(1U << 30);
611 #endif	/* defined(MPTSAS_DEBUG) */
612 uint32_t mptsas_debug_resets = 0;
613 
614 static kmutex_t		mptsas_global_mutex;
615 static void		*mptsas_state;		/* soft	state ptr */
616 static krwlock_t	mptsas_global_rwlock;
617 
618 static kmutex_t		mptsas_log_mutex;
619 static char		mptsas_log_buf[256];
620 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
621 
622 static mptsas_t *mptsas_head, *mptsas_tail;
623 static clock_t mptsas_scsi_watchdog_tick;
624 static clock_t mptsas_tick;
625 static timeout_id_t mptsas_reset_watch;
626 static timeout_id_t mptsas_timeout_id;
627 static int mptsas_timeouts_enabled = 0;
628 
629 /*
630  * Default length for extended auto request sense buffers.
631  * All sense buffers need to be under the same alloc because there
632  * is only one common top 32bits (of 64bits) address register.
633  * Most requests only require 32 bytes, but some request >256.
634  * We use rmalloc()/rmfree() on this additional memory to manage the
635  * "extended" requests.
636  */
637 int mptsas_extreq_sense_bufsize = 256*64;
638 
639 /*
640  * We believe that all software resrictions of having to run with DMA
641  * attributes to limit allocation to the first 4G are removed.
642  * However, this flag remains to enable quick switchback should suspicious
643  * problems emerge.
644  * Note that scsi_alloc_consistent_buf() does still adhere to allocating
645  * 32 bit addressable memory, but we can cope if that is changed now.
646  */
647 int mptsas_use_64bit_msgaddr = 1;
648 
649 /*
650  * warlock directives
651  */
652 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
653 	mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
654 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
655 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
656 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
657 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
658 
659 /*
660  * SM - HBA statics
661  */
662 char	*mptsas_driver_rev = MPTSAS_MOD_STRING;
663 
664 #ifdef MPTSAS_DEBUG
665 void debug_enter(char *);
666 #endif
667 
668 /*
669  * Notes:
670  *	- scsi_hba_init(9F) initializes SCSI HBA modules
671  *	- must call scsi_hba_fini(9F) if modload() fails
672  */
673 int
_init(void)674 _init(void)
675 {
676 	int status;
677 	/* CONSTCOND */
678 	ASSERT(NO_COMPETING_THREADS);
679 
680 	NDBG0(("_init"));
681 
682 	status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
683 	    MPTSAS_INITIAL_SOFT_SPACE);
684 	if (status != 0) {
685 		return (status);
686 	}
687 
688 	if ((status = scsi_hba_init(&modlinkage)) != 0) {
689 		ddi_soft_state_fini(&mptsas_state);
690 		return (status);
691 	}
692 
693 	mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
694 	rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
695 	mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
696 
697 	if ((status = mod_install(&modlinkage)) != 0) {
698 		mutex_destroy(&mptsas_log_mutex);
699 		rw_destroy(&mptsas_global_rwlock);
700 		mutex_destroy(&mptsas_global_mutex);
701 		ddi_soft_state_fini(&mptsas_state);
702 		scsi_hba_fini(&modlinkage);
703 	}
704 
705 	return (status);
706 }
707 
708 /*
709  * Notes:
710  *	- scsi_hba_fini(9F) uninitializes SCSI HBA modules
711  */
712 int
_fini(void)713 _fini(void)
714 {
715 	int	status;
716 	/* CONSTCOND */
717 	ASSERT(NO_COMPETING_THREADS);
718 
719 	NDBG0(("_fini"));
720 
721 	if ((status = mod_remove(&modlinkage)) == 0) {
722 		ddi_soft_state_fini(&mptsas_state);
723 		scsi_hba_fini(&modlinkage);
724 		mutex_destroy(&mptsas_global_mutex);
725 		rw_destroy(&mptsas_global_rwlock);
726 		mutex_destroy(&mptsas_log_mutex);
727 	}
728 	return (status);
729 }
730 
731 /*
732  * The loadable-module _info(9E) entry point
733  */
734 int
_info(struct modinfo * modinfop)735 _info(struct modinfo *modinfop)
736 {
737 	/* CONSTCOND */
738 	ASSERT(NO_COMPETING_THREADS);
739 	NDBG0(("mptsas _info"));
740 
741 	return (mod_info(&modlinkage, modinfop));
742 }
743 
744 static int
mptsas_target_eval_devhdl(const void * op,void * arg)745 mptsas_target_eval_devhdl(const void *op, void *arg)
746 {
747 	uint16_t dh = *(uint16_t *)arg;
748 	const mptsas_target_t *tp = op;
749 
750 	return ((int)tp->m_devhdl - (int)dh);
751 }
752 
753 static int
mptsas_target_eval_nowwn(const void * op,void * arg)754 mptsas_target_eval_nowwn(const void *op, void *arg)
755 {
756 	uint8_t phy = *(uint8_t *)arg;
757 	const mptsas_target_t *tp = op;
758 
759 	if (tp->m_addr.mta_wwn != 0)
760 		return (-1);
761 
762 	return ((int)tp->m_phynum - (int)phy);
763 }
764 
765 static int
mptsas_smp_eval_devhdl(const void * op,void * arg)766 mptsas_smp_eval_devhdl(const void *op, void *arg)
767 {
768 	uint16_t dh = *(uint16_t *)arg;
769 	const mptsas_smp_t *sp = op;
770 
771 	return ((int)sp->m_devhdl - (int)dh);
772 }
773 
774 static uint64_t
mptsas_target_addr_hash(const void * tp)775 mptsas_target_addr_hash(const void *tp)
776 {
777 	const mptsas_target_addr_t *tap = tp;
778 
779 	return ((tap->mta_wwn & 0xffffffffffffULL) |
780 	    ((uint64_t)tap->mta_phymask << 48));
781 }
782 
783 static int
mptsas_target_addr_cmp(const void * a,const void * b)784 mptsas_target_addr_cmp(const void *a, const void *b)
785 {
786 	const mptsas_target_addr_t *aap = a;
787 	const mptsas_target_addr_t *bap = b;
788 
789 	if (aap->mta_wwn < bap->mta_wwn)
790 		return (-1);
791 	if (aap->mta_wwn > bap->mta_wwn)
792 		return (1);
793 	return ((int)bap->mta_phymask - (int)aap->mta_phymask);
794 }
795 
796 static uint64_t
mptsas_tmp_target_hash(const void * tp)797 mptsas_tmp_target_hash(const void *tp)
798 {
799 	return ((uint64_t)(uintptr_t)tp);
800 }
801 
802 static int
mptsas_tmp_target_cmp(const void * a,const void * b)803 mptsas_tmp_target_cmp(const void *a, const void *b)
804 {
805 	if (a > b)
806 		return (1);
807 	if (b < a)
808 		return (-1);
809 
810 	return (0);
811 }
812 
813 static void
mptsas_target_free(void * op)814 mptsas_target_free(void *op)
815 {
816 	kmem_free(op, sizeof (mptsas_target_t));
817 }
818 
819 static void
mptsas_smp_free(void * op)820 mptsas_smp_free(void *op)
821 {
822 	kmem_free(op, sizeof (mptsas_smp_t));
823 }
824 
825 static void
mptsas_destroy_hashes(mptsas_t * mpt)826 mptsas_destroy_hashes(mptsas_t *mpt)
827 {
828 	mptsas_target_t *tp;
829 	mptsas_smp_t *sp;
830 
831 	for (tp = refhash_first(mpt->m_targets); tp != NULL;
832 	    tp = refhash_next(mpt->m_targets, tp)) {
833 		refhash_remove(mpt->m_targets, tp);
834 	}
835 	for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
836 	    sp = refhash_next(mpt->m_smp_targets, sp)) {
837 		refhash_remove(mpt->m_smp_targets, sp);
838 	}
839 	refhash_destroy(mpt->m_tmp_targets);
840 	refhash_destroy(mpt->m_targets);
841 	refhash_destroy(mpt->m_smp_targets);
842 	mpt->m_targets = NULL;
843 	mpt->m_smp_targets = NULL;
844 }
845 
846 static int
mptsas_iport_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)847 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
848 {
849 	dev_info_t		*pdip;
850 	mptsas_t		*mpt;
851 	scsi_hba_tran_t		*hba_tran;
852 	char			*iport = NULL;
853 	char			phymask[MPTSAS_MAX_PHYS];
854 	mptsas_phymask_t	phy_mask = 0;
855 	int			dynamic_port = 0;
856 	uint32_t		page_address;
857 	char			initiator_wwnstr[MPTSAS_WWN_STRLEN];
858 	int			rval = DDI_FAILURE;
859 	int			i = 0;
860 	uint8_t			numphys = 0;
861 	uint8_t			phy_id;
862 	uint8_t			phy_port = 0;
863 	uint16_t		attached_devhdl = 0;
864 	uint32_t		dev_info;
865 	uint64_t		attached_sas_wwn;
866 	uint16_t		dev_hdl;
867 	uint16_t		pdev_hdl;
868 	uint16_t		bay_num, enclosure, io_flags;
869 	char			attached_wwnstr[MPTSAS_WWN_STRLEN];
870 
871 	/* CONSTCOND */
872 	ASSERT(NO_COMPETING_THREADS);
873 
874 	switch (cmd) {
875 	case DDI_ATTACH:
876 		break;
877 
878 	case DDI_RESUME:
879 		/*
880 		 * If this a scsi-iport node, nothing to do here.
881 		 */
882 		return (DDI_SUCCESS);
883 
884 	default:
885 		return (DDI_FAILURE);
886 	}
887 
888 	pdip = ddi_get_parent(dip);
889 
890 	if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
891 	    NULL) {
892 		cmn_err(CE_WARN, "Failed attach iport because fail to "
893 		    "get tran vector for the HBA node");
894 		return (DDI_FAILURE);
895 	}
896 
897 	mpt = TRAN2MPT(hba_tran);
898 	ASSERT(mpt != NULL);
899 	if (mpt == NULL)
900 		return (DDI_FAILURE);
901 
902 	if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
903 	    NULL) {
904 		mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
905 		    "get tran vector for the iport node");
906 		return (DDI_FAILURE);
907 	}
908 
909 	/*
910 	 * Overwrite parent's tran_hba_private to iport's tran vector
911 	 */
912 	hba_tran->tran_hba_private = mpt;
913 
914 	ddi_report_dev(dip);
915 
916 	/*
917 	 * Get SAS address for initiator port according dev_handle
918 	 */
919 	iport = ddi_get_name_addr(dip);
920 	if (iport && strncmp(iport, "v0", 2) == 0) {
921 		if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
922 		    MPTSAS_VIRTUAL_PORT, 1) !=
923 		    DDI_PROP_SUCCESS) {
924 			(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
925 			    MPTSAS_VIRTUAL_PORT);
926 			mptsas_log(mpt, CE_WARN, "mptsas virtual port "
927 			    "prop update failed");
928 			return (DDI_FAILURE);
929 		}
930 		return (DDI_SUCCESS);
931 	}
932 
933 	mutex_enter(&mpt->m_mutex);
934 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
935 		bzero(phymask, sizeof (phymask));
936 		(void) sprintf(phymask,
937 		    "%x", mpt->m_phy_info[i].phy_mask);
938 		if (strcmp(phymask, iport) == 0) {
939 			break;
940 		}
941 	}
942 
943 	if (i == MPTSAS_MAX_PHYS) {
944 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
945 		    "seems not exist", iport);
946 		mutex_exit(&mpt->m_mutex);
947 		return (DDI_FAILURE);
948 	}
949 
950 	phy_mask = mpt->m_phy_info[i].phy_mask;
951 
952 	if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
953 		dynamic_port = 1;
954 	else
955 		dynamic_port = 0;
956 
957 	/*
958 	 * Update PHY info for smhba
959 	 */
960 	if (mptsas_smhba_phy_init(mpt)) {
961 		mutex_exit(&mpt->m_mutex);
962 		mptsas_log(mpt, CE_WARN, "mptsas phy update "
963 		    "failed");
964 		return (DDI_FAILURE);
965 	}
966 
967 	mutex_exit(&mpt->m_mutex);
968 
969 	numphys = 0;
970 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
971 		if ((phy_mask >> i) & 0x01) {
972 			numphys++;
973 		}
974 	}
975 
976 	bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
977 	(void) sprintf(initiator_wwnstr, "w%016"PRIx64,
978 	    mpt->un.m_base_wwid);
979 
980 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
981 	    SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
982 	    DDI_PROP_SUCCESS) {
983 		(void) ddi_prop_remove(DDI_DEV_T_NONE,
984 		    dip, SCSI_ADDR_PROP_INITIATOR_PORT);
985 		mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
986 		    "prop update failed");
987 		return (DDI_FAILURE);
988 	}
989 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
990 	    MPTSAS_NUM_PHYS, numphys) !=
991 	    DDI_PROP_SUCCESS) {
992 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
993 		return (DDI_FAILURE);
994 	}
995 
996 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
997 	    "phymask", phy_mask) !=
998 	    DDI_PROP_SUCCESS) {
999 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
1000 		mptsas_log(mpt, CE_WARN, "mptsas phy mask "
1001 		    "prop update failed");
1002 		return (DDI_FAILURE);
1003 	}
1004 
1005 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
1006 	    "dynamic-port", dynamic_port) !=
1007 	    DDI_PROP_SUCCESS) {
1008 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
1009 		mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
1010 		    "prop update failed");
1011 		return (DDI_FAILURE);
1012 	}
1013 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
1014 	    MPTSAS_VIRTUAL_PORT, 0) !=
1015 	    DDI_PROP_SUCCESS) {
1016 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
1017 		    MPTSAS_VIRTUAL_PORT);
1018 		mptsas_log(mpt, CE_WARN, "mptsas virtual port "
1019 		    "prop update failed");
1020 		return (DDI_FAILURE);
1021 	}
1022 	mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
1023 	    &attached_devhdl);
1024 
1025 	mutex_enter(&mpt->m_mutex);
1026 	page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
1027 	    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
1028 	rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1029 	    &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1030 	    &pdev_hdl, &bay_num, &enclosure, &io_flags);
1031 	if (rval != DDI_SUCCESS) {
1032 		mptsas_log(mpt, CE_WARN,
1033 		    "Failed to get device page0 for handle:%d",
1034 		    attached_devhdl);
1035 		mutex_exit(&mpt->m_mutex);
1036 		return (DDI_FAILURE);
1037 	}
1038 
1039 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1040 		bzero(phymask, sizeof (phymask));
1041 		(void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1042 		if (strcmp(phymask, iport) == 0) {
1043 			(void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1044 			    "%x",
1045 			    mpt->m_phy_info[i].phy_mask);
1046 		}
1047 	}
1048 	mutex_exit(&mpt->m_mutex);
1049 
1050 	bzero(attached_wwnstr, sizeof (attached_wwnstr));
1051 	(void) sprintf(attached_wwnstr, "w%016"PRIx64,
1052 	    attached_sas_wwn);
1053 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1054 	    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1055 	    DDI_PROP_SUCCESS) {
1056 		(void) ddi_prop_remove(DDI_DEV_T_NONE,
1057 		    dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1058 		return (DDI_FAILURE);
1059 	}
1060 
1061 	/* Create kstats for each phy on this iport */
1062 
1063 	mptsas_create_phy_stats(mpt, iport, dip);
1064 
1065 	/*
1066 	 * register sas hba iport with mdi (MPxIO/vhci)
1067 	 */
1068 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1069 	    dip, 0) == MDI_SUCCESS) {
1070 		mpt->m_mpxio_enable = TRUE;
1071 	}
1072 	return (DDI_SUCCESS);
1073 }
1074 
1075 /*
1076  * Notes:
1077  *	Set up all device state and allocate data structures,
1078  *	mutexes, condition variables, etc. for device operation.
1079  *	Add interrupts needed.
1080  *	Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1081  */
1082 static int
mptsas_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1083 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1084 {
1085 	mptsas_t		*mpt = NULL;
1086 	int			instance, i, j;
1087 	int			doneq_thread_num;
1088 	char			intr_added = 0;
1089 	char			map_setup = 0;
1090 	char			config_setup = 0;
1091 	char			hba_attach_setup = 0;
1092 	char			smp_attach_setup = 0;
1093 	char			enc_attach_setup = 0;
1094 	char			mutex_init_done = 0;
1095 	char			event_taskq_create = 0;
1096 	char			dr_taskq_create = 0;
1097 	char			doneq_thread_create = 0;
1098 	char			added_watchdog = 0;
1099 	scsi_hba_tran_t		*hba_tran;
1100 	uint_t			mem_bar = MEM_SPACE;
1101 	int			rval = DDI_FAILURE;
1102 
1103 	/* CONSTCOND */
1104 	ASSERT(NO_COMPETING_THREADS);
1105 
1106 	if (scsi_hba_iport_unit_address(dip)) {
1107 		return (mptsas_iport_attach(dip, cmd));
1108 	}
1109 
1110 	switch (cmd) {
1111 	case DDI_ATTACH:
1112 		break;
1113 
1114 	case DDI_RESUME:
1115 		if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1116 			return (DDI_FAILURE);
1117 
1118 		mpt = TRAN2MPT(hba_tran);
1119 
1120 		if (!mpt) {
1121 			return (DDI_FAILURE);
1122 		}
1123 
1124 		/*
1125 		 * Reset hardware and softc to "no outstanding commands"
1126 		 * Note	that a check condition can result on first command
1127 		 * to a	target.
1128 		 */
1129 		mutex_enter(&mpt->m_mutex);
1130 
1131 		/*
1132 		 * raise power.
1133 		 */
1134 		if (mpt->m_options & MPTSAS_OPT_PM) {
1135 			mutex_exit(&mpt->m_mutex);
1136 			(void) pm_busy_component(dip, 0);
1137 			rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1138 			if (rval == DDI_SUCCESS) {
1139 				mutex_enter(&mpt->m_mutex);
1140 			} else {
1141 				/*
1142 				 * The pm_raise_power() call above failed,
1143 				 * and that can only occur if we were unable
1144 				 * to reset the hardware.  This is probably
1145 				 * due to unhealty hardware, and because
1146 				 * important filesystems(such as the root
1147 				 * filesystem) could be on the attached disks,
1148 				 * it would not be a good idea to continue,
1149 				 * as we won't be entirely certain we are
1150 				 * writing correct data.  So we panic() here
1151 				 * to not only prevent possible data corruption,
1152 				 * but to give developers or end users a hope
1153 				 * of identifying and correcting any problems.
1154 				 */
1155 				fm_panic("mptsas could not reset hardware "
1156 				    "during resume");
1157 			}
1158 		}
1159 
1160 		mpt->m_suspended = 0;
1161 
1162 		/*
1163 		 * Reinitialize ioc
1164 		 */
1165 		mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1166 		if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1167 			mutex_exit(&mpt->m_mutex);
1168 			if (mpt->m_options & MPTSAS_OPT_PM) {
1169 				(void) pm_idle_component(dip, 0);
1170 			}
1171 			fm_panic("mptsas init chip fail during resume");
1172 		}
1173 		/*
1174 		 * mptsas_update_driver_data needs interrupts so enable them
1175 		 * first.
1176 		 */
1177 		MPTSAS_ENABLE_INTR(mpt);
1178 		mptsas_update_driver_data(mpt);
1179 
1180 		/* start requests, if possible */
1181 		mptsas_restart_hba(mpt);
1182 
1183 		mutex_exit(&mpt->m_mutex);
1184 
1185 		/*
1186 		 * Restart watch thread
1187 		 */
1188 		mutex_enter(&mptsas_global_mutex);
1189 		if (mptsas_timeout_id == 0) {
1190 			mptsas_timeout_id = timeout(mptsas_watch, NULL,
1191 			    mptsas_tick);
1192 			mptsas_timeouts_enabled = 1;
1193 		}
1194 		mutex_exit(&mptsas_global_mutex);
1195 
1196 		/* report idle status to pm framework */
1197 		if (mpt->m_options & MPTSAS_OPT_PM) {
1198 			(void) pm_idle_component(dip, 0);
1199 		}
1200 
1201 		return (DDI_SUCCESS);
1202 
1203 	default:
1204 		return (DDI_FAILURE);
1205 
1206 	}
1207 
1208 	instance = ddi_get_instance(dip);
1209 
1210 	/*
1211 	 * Allocate softc information.
1212 	 */
1213 	if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1214 		mptsas_log(NULL, CE_WARN,
1215 		    "mptsas%d: cannot allocate soft state", instance);
1216 		goto fail;
1217 	}
1218 
1219 	mpt = ddi_get_soft_state(mptsas_state, instance);
1220 
1221 	if (mpt == NULL) {
1222 		mptsas_log(NULL, CE_WARN,
1223 		    "mptsas%d: cannot get soft state", instance);
1224 		goto fail;
1225 	}
1226 
1227 	/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1228 	scsi_size_clean(dip);
1229 
1230 	mpt->m_dip = dip;
1231 	mpt->m_instance = instance;
1232 
1233 	/* Make a per-instance copy of the structures */
1234 	mpt->m_io_dma_attr = mptsas_dma_attrs64;
1235 	if (mptsas_use_64bit_msgaddr) {
1236 		mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1237 	} else {
1238 		mpt->m_msg_dma_attr = mptsas_dma_attrs;
1239 	}
1240 	mpt->m_reg_acc_attr = mptsas_dev_attr;
1241 	mpt->m_dev_acc_attr = mptsas_dev_attr;
1242 
1243 	/*
1244 	 * Size of individual request sense buffer
1245 	 */
1246 	mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1247 
1248 	/*
1249 	 * Initialize FMA
1250 	 */
1251 	mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1252 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1253 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1254 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1255 
1256 	mptsas_fm_init(mpt);
1257 
1258 	/*
1259 	 * Initialize us with the UFM subsystem
1260 	 */
1261 	if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &mptsas_ufm_ops,
1262 	    &mpt->m_ufmh, mpt) != 0) {
1263 		mptsas_log(mpt, CE_WARN, "failed to initialize UFM subsystem");
1264 		goto fail;
1265 	}
1266 
1267 	if (mptsas_alloc_handshake_msg(mpt,
1268 	    sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1269 		mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1270 		goto fail;
1271 	}
1272 
1273 	/*
1274 	 * Setup configuration space
1275 	 */
1276 	if (mptsas_config_space_init(mpt) == FALSE) {
1277 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1278 		goto fail;
1279 	}
1280 	config_setup++;
1281 
1282 	if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1283 	    0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1284 		mptsas_log(mpt, CE_WARN, "map setup failed");
1285 		goto fail;
1286 	}
1287 	map_setup++;
1288 
1289 	/*
1290 	 * A taskq is created for dealing with the event handler
1291 	 */
1292 	if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1293 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1294 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1295 		goto fail;
1296 	}
1297 	event_taskq_create++;
1298 
1299 	/*
1300 	 * A taskq is created for dealing with dr events
1301 	 */
1302 	if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1303 	    "mptsas_dr_taskq",
1304 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1305 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1306 		    "failed");
1307 		goto fail;
1308 	}
1309 	dr_taskq_create++;
1310 
1311 	mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1312 	    0, "mptsas_doneq_thread_threshold_prop", 10);
1313 	mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1314 	    0, "mptsas_doneq_length_threshold_prop", 8);
1315 	mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1316 	    0, "mptsas_doneq_thread_n_prop", 8);
1317 
1318 	if (mpt->m_doneq_thread_n) {
1319 		cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1320 		mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1321 
1322 		mutex_enter(&mpt->m_doneq_mutex);
1323 		mpt->m_doneq_thread_id =
1324 		    kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1325 		    * mpt->m_doneq_thread_n, KM_SLEEP);
1326 
1327 		for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1328 			cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1329 			    CV_DRIVER, NULL);
1330 			mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1331 			    MUTEX_DRIVER, NULL);
1332 			mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1333 			mpt->m_doneq_thread_id[j].flag |=
1334 			    MPTSAS_DONEQ_THREAD_ACTIVE;
1335 			mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1336 			mpt->m_doneq_thread_id[j].arg.t = j;
1337 			mpt->m_doneq_thread_id[j].threadp =
1338 			    thread_create(NULL, 0, mptsas_doneq_thread,
1339 			    &mpt->m_doneq_thread_id[j].arg,
1340 			    0, &p0, TS_RUN, minclsyspri);
1341 			mpt->m_doneq_thread_id[j].donetail =
1342 			    &mpt->m_doneq_thread_id[j].doneq;
1343 			mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1344 		}
1345 		mutex_exit(&mpt->m_doneq_mutex);
1346 		doneq_thread_create++;
1347 	}
1348 
1349 	/*
1350 	 * Disable hardware interrupt since we're not ready to
1351 	 * handle it yet.
1352 	 */
1353 	MPTSAS_DISABLE_INTR(mpt);
1354 	if (mptsas_register_intrs(mpt) == FALSE)
1355 		goto fail;
1356 	intr_added++;
1357 
1358 	/* Initialize mutex used in interrupt handler */
1359 	mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1360 	    DDI_INTR_PRI(mpt->m_intr_pri));
1361 	mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1362 	mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1363 	    DDI_INTR_PRI(mpt->m_intr_pri));
1364 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1365 		mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1366 		    NULL, MUTEX_DRIVER,
1367 		    DDI_INTR_PRI(mpt->m_intr_pri));
1368 	}
1369 
1370 	cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1371 	cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1372 	cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1373 	cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1374 	cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1375 	cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1376 	mutex_init_done++;
1377 
1378 	mutex_enter(&mpt->m_mutex);
1379 	/*
1380 	 * Initialize power management component
1381 	 */
1382 	if (mpt->m_options & MPTSAS_OPT_PM) {
1383 		if (mptsas_init_pm(mpt)) {
1384 			mutex_exit(&mpt->m_mutex);
1385 			mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1386 			    "failed");
1387 			goto fail;
1388 		}
1389 	}
1390 
1391 	/*
1392 	 * Initialize chip using Message Unit Reset, if allowed
1393 	 */
1394 	mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1395 	if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1396 		mutex_exit(&mpt->m_mutex);
1397 		mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1398 		goto fail;
1399 	}
1400 
1401 	mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1402 	    mptsas_target_addr_hash, mptsas_target_addr_cmp,
1403 	    mptsas_target_free, sizeof (mptsas_target_t),
1404 	    offsetof(mptsas_target_t, m_link),
1405 	    offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1406 
1407 	/*
1408 	 * The refhash for temporary targets uses the address of the target
1409 	 * struct itself as tag, so the tag offset is 0. See the implementation
1410 	 * of mptsas_tmp_target_hash() and mptsas_tmp_target_cmp().
1411 	 */
1412 	mpt->m_tmp_targets = refhash_create(MPTSAS_TMP_TARGET_BUCKET_COUNT,
1413 	    mptsas_tmp_target_hash, mptsas_tmp_target_cmp,
1414 	    mptsas_target_free, sizeof (mptsas_target_t),
1415 	    offsetof(mptsas_target_t, m_link), 0, KM_SLEEP);
1416 
1417 	/*
1418 	 * Fill in the phy_info structure and get the base WWID
1419 	 */
1420 	if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1421 		mptsas_log(mpt, CE_WARN,
1422 		    "mptsas_get_manufacture_page5 failed!");
1423 		goto fail;
1424 	}
1425 
1426 	if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1427 		mptsas_log(mpt, CE_WARN,
1428 		    "mptsas_get_sas_io_unit_page_hndshk failed!");
1429 		goto fail;
1430 	}
1431 
1432 	if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1433 		mptsas_log(mpt, CE_WARN,
1434 		    "mptsas_get_manufacture_page0 failed!");
1435 		goto fail;
1436 	}
1437 
1438 	mutex_exit(&mpt->m_mutex);
1439 
1440 	/*
1441 	 * Register the iport for multiple port HBA
1442 	 */
1443 	mptsas_iport_register(mpt);
1444 
1445 	/*
1446 	 * initialize SCSI HBA transport structure
1447 	 */
1448 	if (mptsas_hba_setup(mpt) == FALSE)
1449 		goto fail;
1450 	hba_attach_setup++;
1451 
1452 	if (mptsas_smp_setup(mpt) == FALSE)
1453 		goto fail;
1454 	smp_attach_setup++;
1455 
1456 	if (mptsas_enc_setup(mpt) == FALSE)
1457 		goto fail;
1458 	enc_attach_setup++;
1459 
1460 	if (mptsas_cache_create(mpt) == FALSE)
1461 		goto fail;
1462 
1463 	mpt->m_scsi_reset_delay	= ddi_prop_get_int(DDI_DEV_T_ANY,
1464 	    dip, 0, "scsi-reset-delay",	SCSI_DEFAULT_RESET_DELAY);
1465 	if (mpt->m_scsi_reset_delay == 0) {
1466 		mptsas_log(mpt, CE_NOTE,
1467 		    "scsi_reset_delay of 0 is not recommended,"
1468 		    " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1469 		mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1470 	}
1471 
1472 	/*
1473 	 * Initialize the wait and done FIFO queue
1474 	 */
1475 	mpt->m_donetail = &mpt->m_doneq;
1476 	mpt->m_waitqtail = &mpt->m_waitq;
1477 	mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1478 	mpt->m_tx_draining = 0;
1479 
1480 	/*
1481 	 * ioc cmd queue initialize
1482 	 */
1483 	mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1484 	mpt->m_dev_handle = 0xFFFF;
1485 
1486 	MPTSAS_ENABLE_INTR(mpt);
1487 
1488 	/*
1489 	 * enable event notification
1490 	 */
1491 	mutex_enter(&mpt->m_mutex);
1492 	if (mptsas_ioc_enable_event_notification(mpt)) {
1493 		mutex_exit(&mpt->m_mutex);
1494 		goto fail;
1495 	}
1496 	mutex_exit(&mpt->m_mutex);
1497 
1498 	/*
1499 	 * used for mptsas_watch
1500 	 */
1501 	mptsas_list_add(mpt);
1502 
1503 	mutex_enter(&mptsas_global_mutex);
1504 	if (mptsas_timeouts_enabled == 0) {
1505 		mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1506 		    dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1507 
1508 		mptsas_tick = mptsas_scsi_watchdog_tick *
1509 		    drv_usectohz((clock_t)1000000);
1510 
1511 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1512 		mptsas_timeouts_enabled = 1;
1513 	}
1514 	mutex_exit(&mptsas_global_mutex);
1515 	added_watchdog++;
1516 
1517 	/*
1518 	 * Initialize PHY info for smhba.
1519 	 * This requires watchdog to be enabled otherwise if interrupts
1520 	 * don't work the system will hang.
1521 	 */
1522 	if (mptsas_smhba_setup(mpt)) {
1523 		mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1524 		    "failed");
1525 		goto fail;
1526 	}
1527 
1528 	/* Check all dma handles allocated in attach */
1529 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1530 	    != DDI_SUCCESS) ||
1531 	    (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1532 	    != DDI_SUCCESS) ||
1533 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1534 	    != DDI_SUCCESS) ||
1535 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1536 	    != DDI_SUCCESS) ||
1537 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1538 	    != DDI_SUCCESS) ||
1539 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1540 	    != DDI_SUCCESS)) {
1541 		goto fail;
1542 	}
1543 
1544 	/* Check all acc handles allocated in attach */
1545 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1546 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1547 	    != DDI_SUCCESS) ||
1548 	    (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1549 	    != DDI_SUCCESS) ||
1550 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1551 	    != DDI_SUCCESS) ||
1552 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1553 	    != DDI_SUCCESS) ||
1554 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1555 	    != DDI_SUCCESS) ||
1556 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1557 	    != DDI_SUCCESS) ||
1558 	    (mptsas_check_acc_handle(mpt->m_config_handle)
1559 	    != DDI_SUCCESS)) {
1560 		goto fail;
1561 	}
1562 
1563 	/*
1564 	 * After this point, we are not going to fail the attach.
1565 	 */
1566 
1567 	/* Let the UFM susbsystem know we're ready to receive callbacks */
1568 	ddi_ufm_update(mpt->m_ufmh);
1569 
1570 	/* Print message of HBA present */
1571 	ddi_report_dev(dip);
1572 
1573 	/* report idle status to pm framework */
1574 	if (mpt->m_options & MPTSAS_OPT_PM) {
1575 		(void) pm_idle_component(dip, 0);
1576 	}
1577 
1578 	return (DDI_SUCCESS);
1579 
1580 fail:
1581 	mptsas_log(mpt, CE_WARN, "attach failed");
1582 	mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1583 	ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1584 	if (mpt) {
1585 		/* deallocate in reverse order */
1586 		if (added_watchdog) {
1587 			mptsas_list_del(mpt);
1588 			mutex_enter(&mptsas_global_mutex);
1589 
1590 			if (mptsas_timeout_id && (mptsas_head == NULL)) {
1591 				timeout_id_t tid = mptsas_timeout_id;
1592 				mptsas_timeouts_enabled = 0;
1593 				mptsas_timeout_id = 0;
1594 				mutex_exit(&mptsas_global_mutex);
1595 				(void) untimeout(tid);
1596 				mutex_enter(&mptsas_global_mutex);
1597 			}
1598 			mutex_exit(&mptsas_global_mutex);
1599 		}
1600 
1601 		mptsas_cache_destroy(mpt);
1602 
1603 		if (smp_attach_setup) {
1604 			mptsas_smp_teardown(mpt);
1605 		}
1606 		if (enc_attach_setup) {
1607 			mptsas_enc_teardown(mpt);
1608 		}
1609 		if (hba_attach_setup) {
1610 			mptsas_hba_teardown(mpt);
1611 		}
1612 
1613 		if (mpt->m_tmp_targets)
1614 			refhash_destroy(mpt->m_tmp_targets);
1615 		if (mpt->m_targets)
1616 			refhash_destroy(mpt->m_targets);
1617 		if (mpt->m_smp_targets)
1618 			refhash_destroy(mpt->m_smp_targets);
1619 
1620 		if (mpt->m_active) {
1621 			mptsas_free_active_slots(mpt);
1622 		}
1623 		if (intr_added) {
1624 			mptsas_unregister_intrs(mpt);
1625 		}
1626 
1627 		if (doneq_thread_create) {
1628 			mutex_enter(&mpt->m_doneq_mutex);
1629 			doneq_thread_num = mpt->m_doneq_thread_n;
1630 			for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1631 				mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1632 				mpt->m_doneq_thread_id[j].flag &=
1633 				    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1634 				cv_signal(&mpt->m_doneq_thread_id[j].cv);
1635 				mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1636 			}
1637 			while (mpt->m_doneq_thread_n) {
1638 				cv_wait(&mpt->m_doneq_thread_cv,
1639 				    &mpt->m_doneq_mutex);
1640 			}
1641 			for (j = 0; j < doneq_thread_num; j++) {
1642 				cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1643 				mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1644 			}
1645 			kmem_free(mpt->m_doneq_thread_id,
1646 			    sizeof (mptsas_doneq_thread_list_t)
1647 			    * doneq_thread_num);
1648 			mutex_exit(&mpt->m_doneq_mutex);
1649 			cv_destroy(&mpt->m_doneq_thread_cv);
1650 			mutex_destroy(&mpt->m_doneq_mutex);
1651 		}
1652 		if (event_taskq_create) {
1653 			ddi_taskq_destroy(mpt->m_event_taskq);
1654 		}
1655 		if (dr_taskq_create) {
1656 			ddi_taskq_destroy(mpt->m_dr_taskq);
1657 		}
1658 		if (mutex_init_done) {
1659 			mutex_destroy(&mpt->m_tx_waitq_mutex);
1660 			mutex_destroy(&mpt->m_passthru_mutex);
1661 			mutex_destroy(&mpt->m_mutex);
1662 			for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1663 				mutex_destroy(
1664 				    &mpt->m_phy_info[i].smhba_info.phy_mutex);
1665 			}
1666 			cv_destroy(&mpt->m_cv);
1667 			cv_destroy(&mpt->m_passthru_cv);
1668 			cv_destroy(&mpt->m_fw_cv);
1669 			cv_destroy(&mpt->m_config_cv);
1670 			cv_destroy(&mpt->m_fw_diag_cv);
1671 			cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1672 		}
1673 
1674 		if (map_setup) {
1675 			mptsas_cfg_fini(mpt);
1676 		}
1677 		if (config_setup) {
1678 			mptsas_config_space_fini(mpt);
1679 		}
1680 		mptsas_free_handshake_msg(mpt);
1681 		mptsas_hba_fini(mpt);
1682 
1683 		mptsas_fm_fini(mpt);
1684 		ddi_soft_state_free(mptsas_state, instance);
1685 		ddi_prop_remove_all(dip);
1686 	}
1687 	return (DDI_FAILURE);
1688 }
1689 
1690 static int
mptsas_suspend(dev_info_t * devi)1691 mptsas_suspend(dev_info_t *devi)
1692 {
1693 	mptsas_t	*mpt, *g;
1694 	scsi_hba_tran_t	*tran;
1695 
1696 	if (scsi_hba_iport_unit_address(devi)) {
1697 		return (DDI_SUCCESS);
1698 	}
1699 
1700 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1701 		return (DDI_SUCCESS);
1702 
1703 	mpt = TRAN2MPT(tran);
1704 	if (!mpt) {
1705 		return (DDI_SUCCESS);
1706 	}
1707 
1708 	mutex_enter(&mpt->m_mutex);
1709 
1710 	if (mpt->m_suspended++) {
1711 		mutex_exit(&mpt->m_mutex);
1712 		return (DDI_SUCCESS);
1713 	}
1714 
1715 	/*
1716 	 * Cancel timeout threads for this mpt
1717 	 */
1718 	if (mpt->m_quiesce_timeid) {
1719 		timeout_id_t tid = mpt->m_quiesce_timeid;
1720 		mpt->m_quiesce_timeid = 0;
1721 		mutex_exit(&mpt->m_mutex);
1722 		(void) untimeout(tid);
1723 		mutex_enter(&mpt->m_mutex);
1724 	}
1725 
1726 	if (mpt->m_restart_cmd_timeid) {
1727 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1728 		mpt->m_restart_cmd_timeid = 0;
1729 		mutex_exit(&mpt->m_mutex);
1730 		(void) untimeout(tid);
1731 		mutex_enter(&mpt->m_mutex);
1732 	}
1733 
1734 	mutex_exit(&mpt->m_mutex);
1735 
1736 	(void) pm_idle_component(mpt->m_dip, 0);
1737 
1738 	/*
1739 	 * Cancel watch threads if all mpts suspended
1740 	 */
1741 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1742 	for (g = mptsas_head; g != NULL; g = g->m_next) {
1743 		if (!g->m_suspended)
1744 			break;
1745 	}
1746 	rw_exit(&mptsas_global_rwlock);
1747 
1748 	mutex_enter(&mptsas_global_mutex);
1749 	if (g == NULL) {
1750 		timeout_id_t tid;
1751 
1752 		mptsas_timeouts_enabled = 0;
1753 		if (mptsas_timeout_id) {
1754 			tid = mptsas_timeout_id;
1755 			mptsas_timeout_id = 0;
1756 			mutex_exit(&mptsas_global_mutex);
1757 			(void) untimeout(tid);
1758 			mutex_enter(&mptsas_global_mutex);
1759 		}
1760 		if (mptsas_reset_watch) {
1761 			tid = mptsas_reset_watch;
1762 			mptsas_reset_watch = 0;
1763 			mutex_exit(&mptsas_global_mutex);
1764 			(void) untimeout(tid);
1765 			mutex_enter(&mptsas_global_mutex);
1766 		}
1767 	}
1768 	mutex_exit(&mptsas_global_mutex);
1769 
1770 	mutex_enter(&mpt->m_mutex);
1771 
1772 	/*
1773 	 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1774 	 */
1775 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
1776 	    (mpt->m_power_level != PM_LEVEL_D0)) {
1777 		mutex_exit(&mpt->m_mutex);
1778 		return (DDI_SUCCESS);
1779 	}
1780 
1781 	/* Disable HBA interrupts in hardware */
1782 	MPTSAS_DISABLE_INTR(mpt);
1783 	/*
1784 	 * Send RAID action system shutdown to sync IR
1785 	 */
1786 	mptsas_raid_action_system_shutdown(mpt);
1787 
1788 	mutex_exit(&mpt->m_mutex);
1789 
1790 	/* drain the taskq */
1791 	ddi_taskq_wait(mpt->m_event_taskq);
1792 	ddi_taskq_wait(mpt->m_dr_taskq);
1793 
1794 	return (DDI_SUCCESS);
1795 }
1796 
1797 #ifdef	__sparc
1798 /*ARGSUSED*/
1799 static int
mptsas_reset(dev_info_t * devi,ddi_reset_cmd_t cmd)1800 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1801 {
1802 	mptsas_t	*mpt;
1803 	scsi_hba_tran_t *tran;
1804 
1805 	/*
1806 	 * If this call is for iport, just return.
1807 	 */
1808 	if (scsi_hba_iport_unit_address(devi))
1809 		return (DDI_SUCCESS);
1810 
1811 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1812 		return (DDI_SUCCESS);
1813 
1814 	if ((mpt = TRAN2MPT(tran)) == NULL)
1815 		return (DDI_SUCCESS);
1816 
1817 	/*
1818 	 * Send RAID action system shutdown to sync IR.  Disable HBA
1819 	 * interrupts in hardware first.
1820 	 */
1821 	MPTSAS_DISABLE_INTR(mpt);
1822 	mptsas_raid_action_system_shutdown(mpt);
1823 
1824 	return (DDI_SUCCESS);
1825 }
1826 #else /* __sparc */
1827 /*
1828  * quiesce(9E) entry point.
1829  *
1830  * This function is called when the system is single-threaded at high
1831  * PIL with preemption disabled. Therefore, this function must not be
1832  * blocked.
1833  *
1834  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1835  * DDI_FAILURE indicates an error condition and should almost never happen.
1836  */
1837 static int
mptsas_quiesce(dev_info_t * devi)1838 mptsas_quiesce(dev_info_t *devi)
1839 {
1840 	mptsas_t	*mpt;
1841 	scsi_hba_tran_t *tran;
1842 
1843 	/*
1844 	 * If this call is for iport, just return.
1845 	 */
1846 	if (scsi_hba_iport_unit_address(devi))
1847 		return (DDI_SUCCESS);
1848 
1849 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1850 		return (DDI_SUCCESS);
1851 
1852 	if ((mpt = TRAN2MPT(tran)) == NULL)
1853 		return (DDI_SUCCESS);
1854 
1855 	/* Disable HBA interrupts in hardware */
1856 	MPTSAS_DISABLE_INTR(mpt);
1857 	/* Send RAID action system shutdonw to sync IR */
1858 	mptsas_raid_action_system_shutdown(mpt);
1859 
1860 	return (DDI_SUCCESS);
1861 }
1862 #endif	/* __sparc */
1863 
1864 /*
1865  * detach(9E).	Remove all device allocations and system resources;
1866  * disable device interrupts.
1867  * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1868  */
1869 static int
mptsas_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)1870 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1871 {
1872 	/* CONSTCOND */
1873 	ASSERT(NO_COMPETING_THREADS);
1874 	NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1875 
1876 	switch (cmd) {
1877 	case DDI_DETACH:
1878 		return (mptsas_do_detach(devi));
1879 
1880 	case DDI_SUSPEND:
1881 		return (mptsas_suspend(devi));
1882 
1883 	default:
1884 		return (DDI_FAILURE);
1885 	}
1886 	/* NOTREACHED */
1887 }
1888 
1889 static int
mptsas_do_detach(dev_info_t * dip)1890 mptsas_do_detach(dev_info_t *dip)
1891 {
1892 	mptsas_t	*mpt;
1893 	scsi_hba_tran_t	*tran;
1894 	mdi_pathinfo_t	*pip = NULL;
1895 	int		i;
1896 	int		doneq_thread_num = 0;
1897 
1898 	NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1899 
1900 	if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1901 		return (DDI_FAILURE);
1902 
1903 	mpt = TRAN2MPT(tran);
1904 	if (!mpt) {
1905 		return (DDI_FAILURE);
1906 	}
1907 
1908 	ddi_ufm_fini(mpt->m_ufmh);
1909 
1910 	/*
1911 	 * Still have pathinfo child, should not detach mpt driver
1912 	 */
1913 	if (scsi_hba_iport_unit_address(dip)) {
1914 		if (mpt->m_mpxio_enable) {
1915 			/*
1916 			 * MPxIO enabled for the iport
1917 			 */
1918 			ndi_devi_enter(scsi_vhci_dip);
1919 			ndi_devi_enter(dip);
1920 			while ((pip = mdi_get_next_client_path(dip, NULL)) !=
1921 			    NULL) {
1922 				if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1923 					continue;
1924 				}
1925 				ndi_devi_exit(dip);
1926 				ndi_devi_exit(scsi_vhci_dip);
1927 				NDBG12(("detach failed because of "
1928 				    "outstanding path info"));
1929 				return (DDI_FAILURE);
1930 			}
1931 			ndi_devi_exit(dip);
1932 			ndi_devi_exit(scsi_vhci_dip);
1933 			(void) mdi_phci_unregister(dip, 0);
1934 		}
1935 
1936 		ddi_prop_remove_all(dip);
1937 
1938 		return (DDI_SUCCESS);
1939 	}
1940 
1941 	/* Make sure power level is D0 before accessing registers */
1942 	if (mpt->m_options & MPTSAS_OPT_PM) {
1943 		(void) pm_busy_component(dip, 0);
1944 		if (mpt->m_power_level != PM_LEVEL_D0) {
1945 			if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1946 			    DDI_SUCCESS) {
1947 				mptsas_log(mpt, CE_WARN,
1948 				    "mptsas%d: Raise power request failed.",
1949 				    mpt->m_instance);
1950 				(void) pm_idle_component(dip, 0);
1951 				return (DDI_FAILURE);
1952 			}
1953 		}
1954 	}
1955 
1956 	/*
1957 	 * Send RAID action system shutdown to sync IR.  After action, send a
1958 	 * Message Unit Reset. Since after that DMA resource will be freed,
1959 	 * set ioc to READY state will avoid HBA initiated DMA operation.
1960 	 */
1961 	mutex_enter(&mpt->m_mutex);
1962 	MPTSAS_DISABLE_INTR(mpt);
1963 	mptsas_raid_action_system_shutdown(mpt);
1964 	mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1965 	(void) mptsas_ioc_reset(mpt, FALSE);
1966 	mutex_exit(&mpt->m_mutex);
1967 	mptsas_rem_intrs(mpt);
1968 	ddi_taskq_destroy(mpt->m_event_taskq);
1969 	ddi_taskq_destroy(mpt->m_dr_taskq);
1970 
1971 	if (mpt->m_doneq_thread_n) {
1972 		mutex_enter(&mpt->m_doneq_mutex);
1973 		doneq_thread_num = mpt->m_doneq_thread_n;
1974 		for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1975 			mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1976 			mpt->m_doneq_thread_id[i].flag &=
1977 			    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1978 			cv_signal(&mpt->m_doneq_thread_id[i].cv);
1979 			mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1980 		}
1981 		while (mpt->m_doneq_thread_n) {
1982 			cv_wait(&mpt->m_doneq_thread_cv,
1983 			    &mpt->m_doneq_mutex);
1984 		}
1985 		for (i = 0;  i < doneq_thread_num; i++) {
1986 			cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1987 			mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1988 		}
1989 		kmem_free(mpt->m_doneq_thread_id,
1990 		    sizeof (mptsas_doneq_thread_list_t)
1991 		    * doneq_thread_num);
1992 		mutex_exit(&mpt->m_doneq_mutex);
1993 		cv_destroy(&mpt->m_doneq_thread_cv);
1994 		mutex_destroy(&mpt->m_doneq_mutex);
1995 	}
1996 
1997 	scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1998 
1999 	mptsas_list_del(mpt);
2000 
2001 	/*
2002 	 * Cancel timeout threads for this mpt
2003 	 */
2004 	mutex_enter(&mpt->m_mutex);
2005 	if (mpt->m_quiesce_timeid) {
2006 		timeout_id_t tid = mpt->m_quiesce_timeid;
2007 		mpt->m_quiesce_timeid = 0;
2008 		mutex_exit(&mpt->m_mutex);
2009 		(void) untimeout(tid);
2010 		mutex_enter(&mpt->m_mutex);
2011 	}
2012 
2013 	if (mpt->m_restart_cmd_timeid) {
2014 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
2015 		mpt->m_restart_cmd_timeid = 0;
2016 		mutex_exit(&mpt->m_mutex);
2017 		(void) untimeout(tid);
2018 		mutex_enter(&mpt->m_mutex);
2019 	}
2020 
2021 	mutex_exit(&mpt->m_mutex);
2022 
2023 	/*
2024 	 * last mpt? ... if active, CANCEL watch threads.
2025 	 */
2026 	mutex_enter(&mptsas_global_mutex);
2027 	if (mptsas_head == NULL) {
2028 		timeout_id_t tid;
2029 		/*
2030 		 * Clear mptsas_timeouts_enable so that the watch thread
2031 		 * gets restarted on DDI_ATTACH
2032 		 */
2033 		mptsas_timeouts_enabled = 0;
2034 		if (mptsas_timeout_id) {
2035 			tid = mptsas_timeout_id;
2036 			mptsas_timeout_id = 0;
2037 			mutex_exit(&mptsas_global_mutex);
2038 			(void) untimeout(tid);
2039 			mutex_enter(&mptsas_global_mutex);
2040 		}
2041 		if (mptsas_reset_watch) {
2042 			tid = mptsas_reset_watch;
2043 			mptsas_reset_watch = 0;
2044 			mutex_exit(&mptsas_global_mutex);
2045 			(void) untimeout(tid);
2046 			mutex_enter(&mptsas_global_mutex);
2047 		}
2048 	}
2049 	mutex_exit(&mptsas_global_mutex);
2050 
2051 	/*
2052 	 * Delete Phy stats
2053 	 */
2054 	mptsas_destroy_phy_stats(mpt);
2055 
2056 	mptsas_destroy_hashes(mpt);
2057 
2058 	/*
2059 	 * Delete nt_active.
2060 	 */
2061 	mutex_enter(&mpt->m_mutex);
2062 	mptsas_free_active_slots(mpt);
2063 	mutex_exit(&mpt->m_mutex);
2064 
2065 	/* deallocate everything that was allocated in mptsas_attach */
2066 	mptsas_cache_destroy(mpt);
2067 
2068 	mptsas_hba_fini(mpt);
2069 	mptsas_cfg_fini(mpt);
2070 
2071 	/* Lower the power informing PM Framework */
2072 	if (mpt->m_options & MPTSAS_OPT_PM) {
2073 		if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2074 			mptsas_log(mpt, CE_WARN,
2075 			    "!mptsas%d: Lower power request failed "
2076 			    "during detach, ignoring.",
2077 			    mpt->m_instance);
2078 	}
2079 
2080 	mutex_destroy(&mpt->m_tx_waitq_mutex);
2081 	mutex_destroy(&mpt->m_passthru_mutex);
2082 	mutex_destroy(&mpt->m_mutex);
2083 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2084 		mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2085 	}
2086 	cv_destroy(&mpt->m_cv);
2087 	cv_destroy(&mpt->m_passthru_cv);
2088 	cv_destroy(&mpt->m_fw_cv);
2089 	cv_destroy(&mpt->m_config_cv);
2090 	cv_destroy(&mpt->m_fw_diag_cv);
2091 	cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2092 
2093 	mptsas_smp_teardown(mpt);
2094 	mptsas_enc_teardown(mpt);
2095 	mptsas_hba_teardown(mpt);
2096 
2097 	mptsas_config_space_fini(mpt);
2098 
2099 	mptsas_free_handshake_msg(mpt);
2100 
2101 	mptsas_fm_fini(mpt);
2102 	ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2103 	ddi_prop_remove_all(dip);
2104 
2105 	return (DDI_SUCCESS);
2106 }
2107 
2108 static void
mptsas_list_add(mptsas_t * mpt)2109 mptsas_list_add(mptsas_t *mpt)
2110 {
2111 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
2112 
2113 	if (mptsas_head == NULL) {
2114 		mptsas_head = mpt;
2115 	} else {
2116 		mptsas_tail->m_next = mpt;
2117 	}
2118 	mptsas_tail = mpt;
2119 	rw_exit(&mptsas_global_rwlock);
2120 }
2121 
2122 static void
mptsas_list_del(mptsas_t * mpt)2123 mptsas_list_del(mptsas_t *mpt)
2124 {
2125 	mptsas_t *m;
2126 	/*
2127 	 * Remove device instance from the global linked list
2128 	 */
2129 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
2130 	if (mptsas_head == mpt) {
2131 		m = mptsas_head = mpt->m_next;
2132 	} else {
2133 		for (m = mptsas_head; m != NULL; m = m->m_next) {
2134 			if (m->m_next == mpt) {
2135 				m->m_next = mpt->m_next;
2136 				break;
2137 			}
2138 		}
2139 		if (m == NULL) {
2140 			mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2141 		}
2142 	}
2143 
2144 	if (mptsas_tail == mpt) {
2145 		mptsas_tail = m;
2146 	}
2147 	rw_exit(&mptsas_global_rwlock);
2148 }
2149 
2150 static int
mptsas_alloc_handshake_msg(mptsas_t * mpt,size_t alloc_size)2151 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2152 {
2153 	ddi_dma_attr_t	task_dma_attrs;
2154 
2155 	mpt->m_hshk_dma_size = 0;
2156 	task_dma_attrs = mpt->m_msg_dma_attr;
2157 	task_dma_attrs.dma_attr_sgllen = 1;
2158 	task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2159 
2160 	/* allocate Task Management ddi_dma resources */
2161 	if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2162 	    &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2163 	    alloc_size, NULL) == FALSE) {
2164 		return (DDI_FAILURE);
2165 	}
2166 	mpt->m_hshk_dma_size = alloc_size;
2167 
2168 	return (DDI_SUCCESS);
2169 }
2170 
2171 static void
mptsas_free_handshake_msg(mptsas_t * mpt)2172 mptsas_free_handshake_msg(mptsas_t *mpt)
2173 {
2174 	if (mpt->m_hshk_dma_size == 0)
2175 		return;
2176 	mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2177 	mpt->m_hshk_dma_size = 0;
2178 }
2179 
2180 static int
mptsas_hba_setup(mptsas_t * mpt)2181 mptsas_hba_setup(mptsas_t *mpt)
2182 {
2183 	scsi_hba_tran_t		*hba_tran;
2184 	int			tran_flags;
2185 
2186 	/* Allocate a transport structure */
2187 	hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2188 	    SCSI_HBA_CANSLEEP);
2189 	ASSERT(mpt->m_tran != NULL);
2190 
2191 	hba_tran->tran_hba_private	= mpt;
2192 	hba_tran->tran_tgt_private	= NULL;
2193 
2194 	hba_tran->tran_tgt_init		= mptsas_scsi_tgt_init;
2195 	hba_tran->tran_tgt_free		= mptsas_scsi_tgt_free;
2196 
2197 	hba_tran->tran_start		= mptsas_scsi_start;
2198 	hba_tran->tran_reset		= mptsas_scsi_reset;
2199 	hba_tran->tran_abort		= mptsas_scsi_abort;
2200 	hba_tran->tran_getcap		= mptsas_scsi_getcap;
2201 	hba_tran->tran_setcap		= mptsas_scsi_setcap;
2202 	hba_tran->tran_init_pkt		= mptsas_scsi_init_pkt;
2203 	hba_tran->tran_destroy_pkt	= mptsas_scsi_destroy_pkt;
2204 
2205 	hba_tran->tran_dmafree		= mptsas_scsi_dmafree;
2206 	hba_tran->tran_sync_pkt		= mptsas_scsi_sync_pkt;
2207 	hba_tran->tran_reset_notify	= mptsas_scsi_reset_notify;
2208 
2209 	hba_tran->tran_get_bus_addr	= mptsas_get_bus_addr;
2210 	hba_tran->tran_get_name		= mptsas_get_name;
2211 
2212 	hba_tran->tran_quiesce		= mptsas_scsi_quiesce;
2213 	hba_tran->tran_unquiesce	= mptsas_scsi_unquiesce;
2214 	hba_tran->tran_bus_reset	= NULL;
2215 
2216 	hba_tran->tran_add_eventcall	= NULL;
2217 	hba_tran->tran_get_eventcookie	= NULL;
2218 	hba_tran->tran_post_event	= NULL;
2219 	hba_tran->tran_remove_eventcall	= NULL;
2220 
2221 	hba_tran->tran_bus_config	= mptsas_bus_config;
2222 
2223 	hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2224 
2225 	/*
2226 	 * All children of the HBA are iports. We need tran was cloned.
2227 	 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2228 	 * inherited to iport's tran vector.
2229 	 */
2230 	tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2231 
2232 	if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2233 	    hba_tran, tran_flags) != DDI_SUCCESS) {
2234 		mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2235 		scsi_hba_tran_free(hba_tran);
2236 		mpt->m_tran = NULL;
2237 		return (FALSE);
2238 	}
2239 	return (TRUE);
2240 }
2241 
2242 static void
mptsas_hba_teardown(mptsas_t * mpt)2243 mptsas_hba_teardown(mptsas_t *mpt)
2244 {
2245 	(void) scsi_hba_detach(mpt->m_dip);
2246 	if (mpt->m_tran != NULL) {
2247 		scsi_hba_tran_free(mpt->m_tran);
2248 		mpt->m_tran = NULL;
2249 	}
2250 }
2251 
2252 static void
mptsas_iport_register(mptsas_t * mpt)2253 mptsas_iport_register(mptsas_t *mpt)
2254 {
2255 	int i, j;
2256 	mptsas_phymask_t	mask = 0x0;
2257 	/*
2258 	 * initial value of mask is 0
2259 	 */
2260 	mutex_enter(&mpt->m_mutex);
2261 	for (i = 0; i < mpt->m_num_phys; i++) {
2262 		mptsas_phymask_t phy_mask = 0x0;
2263 		char phy_mask_name[MPTSAS_MAX_PHYS];
2264 		uint8_t current_port;
2265 
2266 		if (mpt->m_phy_info[i].attached_devhdl == 0)
2267 			continue;
2268 
2269 		bzero(phy_mask_name, sizeof (phy_mask_name));
2270 
2271 		current_port = mpt->m_phy_info[i].port_num;
2272 
2273 		if ((mask & (1 << i)) != 0)
2274 			continue;
2275 
2276 		for (j = 0; j < mpt->m_num_phys; j++) {
2277 			if (mpt->m_phy_info[j].attached_devhdl &&
2278 			    (mpt->m_phy_info[j].port_num == current_port)) {
2279 				phy_mask |= (1 << j);
2280 			}
2281 		}
2282 		mask = mask | phy_mask;
2283 
2284 		for (j = 0; j < mpt->m_num_phys; j++) {
2285 			if ((phy_mask >> j) & 0x01) {
2286 				mpt->m_phy_info[j].phy_mask = phy_mask;
2287 			}
2288 		}
2289 
2290 		(void) sprintf(phy_mask_name, "%x", phy_mask);
2291 
2292 		mutex_exit(&mpt->m_mutex);
2293 		/*
2294 		 * register a iport
2295 		 */
2296 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2297 		mutex_enter(&mpt->m_mutex);
2298 	}
2299 	mutex_exit(&mpt->m_mutex);
2300 	/*
2301 	 * register a virtual port for RAID volume always
2302 	 */
2303 	(void) scsi_hba_iport_register(mpt->m_dip, "v0");
2304 
2305 }
2306 
2307 static int
mptsas_smp_setup(mptsas_t * mpt)2308 mptsas_smp_setup(mptsas_t *mpt)
2309 {
2310 	mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2311 	ASSERT(mpt->m_smptran != NULL);
2312 	mpt->m_smptran->smp_tran_hba_private = mpt;
2313 	mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2314 	if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2315 		mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2316 		smp_hba_tran_free(mpt->m_smptran);
2317 		mpt->m_smptran = NULL;
2318 		return (FALSE);
2319 	}
2320 	/*
2321 	 * Initialize smp hash table
2322 	 */
2323 	mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2324 	    mptsas_target_addr_hash, mptsas_target_addr_cmp,
2325 	    mptsas_smp_free, sizeof (mptsas_smp_t),
2326 	    offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2327 	    KM_SLEEP);
2328 	mpt->m_smp_devhdl = 0xFFFF;
2329 
2330 	return (TRUE);
2331 }
2332 
2333 static void
mptsas_smp_teardown(mptsas_t * mpt)2334 mptsas_smp_teardown(mptsas_t *mpt)
2335 {
2336 	(void) smp_hba_detach(mpt->m_dip);
2337 	if (mpt->m_smptran != NULL) {
2338 		smp_hba_tran_free(mpt->m_smptran);
2339 		mpt->m_smptran = NULL;
2340 	}
2341 	mpt->m_smp_devhdl = 0;
2342 }
2343 
2344 static int
mptsas_enc_setup(mptsas_t * mpt)2345 mptsas_enc_setup(mptsas_t *mpt)
2346 {
2347 	list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2348 	    offsetof(mptsas_enclosure_t, me_link));
2349 	return (TRUE);
2350 }
2351 
2352 static void
mptsas_enc_free(mptsas_enclosure_t * mep)2353 mptsas_enc_free(mptsas_enclosure_t *mep)
2354 {
2355 	if (mep == NULL)
2356 		return;
2357 	if (mep->me_slotleds != NULL) {
2358 		VERIFY3U(mep->me_nslots, >, 0);
2359 		kmem_free(mep->me_slotleds, sizeof (uint8_t) * mep->me_nslots);
2360 	}
2361 	kmem_free(mep, sizeof (mptsas_enclosure_t));
2362 }
2363 
2364 static void
mptsas_enc_teardown(mptsas_t * mpt)2365 mptsas_enc_teardown(mptsas_t *mpt)
2366 {
2367 	mptsas_enclosure_t *mep;
2368 
2369 	while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2370 		mptsas_enc_free(mep);
2371 	}
2372 	list_destroy(&mpt->m_enclosures);
2373 }
2374 
2375 static mptsas_enclosure_t *
mptsas_enc_lookup(mptsas_t * mpt,uint16_t hdl)2376 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2377 {
2378 	mptsas_enclosure_t *mep;
2379 
2380 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
2381 
2382 	for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2383 	    mep = list_next(&mpt->m_enclosures, mep)) {
2384 		if (hdl == mep->me_enchdl) {
2385 			return (mep);
2386 		}
2387 	}
2388 
2389 	return (NULL);
2390 }
2391 
2392 static int
mptsas_cache_create(mptsas_t * mpt)2393 mptsas_cache_create(mptsas_t *mpt)
2394 {
2395 	int instance = mpt->m_instance;
2396 	char buf[64];
2397 
2398 	/*
2399 	 * create kmem cache for packets
2400 	 */
2401 	(void) sprintf(buf, "mptsas%d_cache", instance);
2402 	mpt->m_kmem_cache = kmem_cache_create(buf,
2403 	    sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2404 	    mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2405 	    NULL, (void *)mpt, NULL, 0);
2406 
2407 	if (mpt->m_kmem_cache == NULL) {
2408 		mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2409 		return (FALSE);
2410 	}
2411 
2412 	/*
2413 	 * create kmem cache for extra SGL frames if SGL cannot
2414 	 * be accomodated into main request frame.
2415 	 */
2416 	(void) sprintf(buf, "mptsas%d_cache_frames", instance);
2417 	mpt->m_cache_frames = kmem_cache_create(buf,
2418 	    sizeof (mptsas_cache_frames_t), 8,
2419 	    mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2420 	    NULL, (void *)mpt, NULL, 0);
2421 
2422 	if (mpt->m_cache_frames == NULL) {
2423 		mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2424 		return (FALSE);
2425 	}
2426 
2427 	return (TRUE);
2428 }
2429 
2430 static void
mptsas_cache_destroy(mptsas_t * mpt)2431 mptsas_cache_destroy(mptsas_t *mpt)
2432 {
2433 	/* deallocate in reverse order */
2434 	if (mpt->m_cache_frames) {
2435 		kmem_cache_destroy(mpt->m_cache_frames);
2436 		mpt->m_cache_frames = NULL;
2437 	}
2438 	if (mpt->m_kmem_cache) {
2439 		kmem_cache_destroy(mpt->m_kmem_cache);
2440 		mpt->m_kmem_cache = NULL;
2441 	}
2442 }
2443 
2444 static int
mptsas_power(dev_info_t * dip,int component,int level)2445 mptsas_power(dev_info_t *dip, int component, int level)
2446 {
2447 #ifndef __lock_lint
2448 	_NOTE(ARGUNUSED(component))
2449 #endif
2450 	mptsas_t	*mpt;
2451 	int		rval = DDI_SUCCESS;
2452 	int		polls = 0;
2453 	uint32_t	ioc_status;
2454 
2455 	if (scsi_hba_iport_unit_address(dip) != 0)
2456 		return (DDI_SUCCESS);
2457 
2458 	mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2459 	if (mpt == NULL) {
2460 		return (DDI_FAILURE);
2461 	}
2462 
2463 	mutex_enter(&mpt->m_mutex);
2464 
2465 	/*
2466 	 * If the device is busy, don't lower its power level
2467 	 */
2468 	if (mpt->m_busy && (mpt->m_power_level > level)) {
2469 		mutex_exit(&mpt->m_mutex);
2470 		return (DDI_FAILURE);
2471 	}
2472 	switch (level) {
2473 	case PM_LEVEL_D0:
2474 		NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2475 		MPTSAS_POWER_ON(mpt);
2476 		/*
2477 		 * Wait up to 30 seconds for IOC to come out of reset.
2478 		 */
2479 		while (((ioc_status = ddi_get32(mpt->m_datap,
2480 		    &mpt->m_reg->Doorbell)) &
2481 		    MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2482 			if (polls++ > 3000) {
2483 				break;
2484 			}
2485 			delay(drv_usectohz(10000));
2486 		}
2487 		/*
2488 		 * If IOC is not in operational state, try to hard reset it.
2489 		 */
2490 		if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2491 		    MPI2_IOC_STATE_OPERATIONAL) {
2492 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2493 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2494 				mptsas_log(mpt, CE_WARN,
2495 				    "mptsas_power: hard reset failed");
2496 				mutex_exit(&mpt->m_mutex);
2497 				return (DDI_FAILURE);
2498 			}
2499 		}
2500 		mpt->m_power_level = PM_LEVEL_D0;
2501 		break;
2502 	case PM_LEVEL_D3:
2503 		NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2504 		MPTSAS_POWER_OFF(mpt);
2505 		break;
2506 	default:
2507 		mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2508 		    mpt->m_instance, level);
2509 		rval = DDI_FAILURE;
2510 		break;
2511 	}
2512 	mutex_exit(&mpt->m_mutex);
2513 	return (rval);
2514 }
2515 
2516 /*
2517  * Initialize configuration space and figure out which
2518  * chip and revison of the chip the mpt driver is using.
2519  */
2520 static int
mptsas_config_space_init(mptsas_t * mpt)2521 mptsas_config_space_init(mptsas_t *mpt)
2522 {
2523 	NDBG0(("mptsas_config_space_init"));
2524 
2525 	if (mpt->m_config_handle != NULL)
2526 		return (TRUE);
2527 
2528 	if (pci_config_setup(mpt->m_dip,
2529 	    &mpt->m_config_handle) != DDI_SUCCESS) {
2530 		mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2531 		return (FALSE);
2532 	}
2533 
2534 	/*
2535 	 * This is a workaround for a XMITS ASIC bug which does not
2536 	 * drive the CBE upper bits.
2537 	 */
2538 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2539 	    PCI_STAT_PERROR) {
2540 		pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2541 		    PCI_STAT_PERROR);
2542 	}
2543 
2544 	mptsas_setup_cmd_reg(mpt);
2545 
2546 	/*
2547 	 * Get the chip device id:
2548 	 */
2549 	mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2550 
2551 	/*
2552 	 * Save the revision.
2553 	 */
2554 	mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2555 
2556 	/*
2557 	 * Save the SubSystem Vendor and Device IDs
2558 	 */
2559 	mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2560 	mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2561 
2562 	/*
2563 	 * Set the latency timer to 0x40 as specified by the upa -> pci
2564 	 * bridge chip design team.  This may be done by the sparc pci
2565 	 * bus nexus driver, but the driver should make sure the latency
2566 	 * timer is correct for performance reasons.
2567 	 */
2568 	pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2569 	    MPTSAS_LATENCY_TIMER);
2570 
2571 	(void) mptsas_get_pci_cap(mpt);
2572 	return (TRUE);
2573 }
2574 
2575 static void
mptsas_config_space_fini(mptsas_t * mpt)2576 mptsas_config_space_fini(mptsas_t *mpt)
2577 {
2578 	if (mpt->m_config_handle != NULL) {
2579 		mptsas_disable_bus_master(mpt);
2580 		pci_config_teardown(&mpt->m_config_handle);
2581 		mpt->m_config_handle = NULL;
2582 	}
2583 }
2584 
2585 static void
mptsas_setup_cmd_reg(mptsas_t * mpt)2586 mptsas_setup_cmd_reg(mptsas_t *mpt)
2587 {
2588 	ushort_t	cmdreg;
2589 
2590 	/*
2591 	 * Set the command register to the needed values.
2592 	 */
2593 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2594 	cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2595 	    PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2596 	cmdreg &= ~PCI_COMM_IO;
2597 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2598 }
2599 
2600 static void
mptsas_disable_bus_master(mptsas_t * mpt)2601 mptsas_disable_bus_master(mptsas_t *mpt)
2602 {
2603 	ushort_t	cmdreg;
2604 
2605 	/*
2606 	 * Clear the master enable bit in the PCI command register.
2607 	 * This prevents any bus mastering activity like DMA.
2608 	 */
2609 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2610 	cmdreg &= ~PCI_COMM_ME;
2611 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2612 }
2613 
2614 int
mptsas_dma_alloc(mptsas_t * mpt,mptsas_dma_alloc_state_t * dma_statep)2615 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2616 {
2617 	ddi_dma_attr_t	attrs;
2618 
2619 	attrs = mpt->m_io_dma_attr;
2620 	attrs.dma_attr_sgllen = 1;
2621 
2622 	ASSERT(dma_statep != NULL);
2623 
2624 	if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2625 	    &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2626 	    &dma_statep->cookie) == FALSE) {
2627 		return (DDI_FAILURE);
2628 	}
2629 
2630 	return (DDI_SUCCESS);
2631 }
2632 
2633 void
mptsas_dma_free(mptsas_dma_alloc_state_t * dma_statep)2634 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2635 {
2636 	ASSERT(dma_statep != NULL);
2637 	mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2638 	dma_statep->size = 0;
2639 }
2640 
2641 int
mptsas_do_dma(mptsas_t * mpt,uint32_t size,int var,int (* callback)())2642 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2643 {
2644 	ddi_dma_attr_t		attrs;
2645 	ddi_dma_handle_t	dma_handle;
2646 	caddr_t			memp;
2647 	ddi_acc_handle_t	accessp;
2648 	int			rval;
2649 
2650 	ASSERT(mutex_owned(&mpt->m_mutex));
2651 
2652 	attrs = mpt->m_msg_dma_attr;
2653 	attrs.dma_attr_sgllen = 1;
2654 	attrs.dma_attr_granular = size;
2655 
2656 	if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2657 	    &accessp, &memp, size, NULL) == FALSE) {
2658 		return (DDI_FAILURE);
2659 	}
2660 
2661 	rval = (*callback) (mpt, memp, var, accessp);
2662 
2663 	if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2664 	    (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2665 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2666 		rval = DDI_FAILURE;
2667 	}
2668 
2669 	mptsas_dma_addr_destroy(&dma_handle, &accessp);
2670 	return (rval);
2671 
2672 }
2673 
2674 static int
mptsas_alloc_request_frames(mptsas_t * mpt)2675 mptsas_alloc_request_frames(mptsas_t *mpt)
2676 {
2677 	ddi_dma_attr_t		frame_dma_attrs;
2678 	caddr_t			memp;
2679 	ddi_dma_cookie_t	cookie;
2680 	size_t			mem_size;
2681 
2682 	/*
2683 	 * re-alloc when it has already alloced
2684 	 */
2685 	if (mpt->m_dma_req_frame_hdl)
2686 		mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2687 		    &mpt->m_acc_req_frame_hdl);
2688 
2689 	/*
2690 	 * The size of the request frame pool is:
2691 	 *   Number of Request Frames * Request Frame Size
2692 	 */
2693 	mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2694 
2695 	/*
2696 	 * set the DMA attributes.  System Request Message Frames must be
2697 	 * aligned on a 16-byte boundry.
2698 	 */
2699 	frame_dma_attrs = mpt->m_msg_dma_attr;
2700 	frame_dma_attrs.dma_attr_align = 16;
2701 	frame_dma_attrs.dma_attr_sgllen = 1;
2702 
2703 	/*
2704 	 * allocate the request frame pool.
2705 	 */
2706 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2707 	    &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2708 	    mem_size, &cookie) == FALSE) {
2709 		return (DDI_FAILURE);
2710 	}
2711 
2712 	/*
2713 	 * Store the request frame memory address.  This chip uses this
2714 	 * address to dma to and from the driver's frame.  The second
2715 	 * address is the address mpt uses to fill in the frame.
2716 	 */
2717 	mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2718 	mpt->m_req_frame = memp;
2719 
2720 	/*
2721 	 * Clear the request frame pool.
2722 	 */
2723 	bzero(mpt->m_req_frame, mem_size);
2724 
2725 	return (DDI_SUCCESS);
2726 }
2727 
2728 static int
mptsas_alloc_sense_bufs(mptsas_t * mpt)2729 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2730 {
2731 	ddi_dma_attr_t		sense_dma_attrs;
2732 	caddr_t			memp;
2733 	ddi_dma_cookie_t	cookie;
2734 	size_t			mem_size;
2735 	int			num_extrqsense_bufs;
2736 
2737 	ASSERT(mpt->m_extreq_sense_refcount == 0);
2738 
2739 	/*
2740 	 * re-alloc when it has already alloced
2741 	 */
2742 	if (mpt->m_dma_req_sense_hdl) {
2743 		rmfreemap(mpt->m_erqsense_map);
2744 		mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2745 		    &mpt->m_acc_req_sense_hdl);
2746 	}
2747 
2748 	/*
2749 	 * The size of the request sense pool is:
2750 	 *   (Number of Request Frames - 2 ) * Request Sense Size +
2751 	 *   extra memory for extended sense requests.
2752 	 */
2753 	mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2754 	    mptsas_extreq_sense_bufsize;
2755 
2756 	/*
2757 	 * set the DMA attributes.  ARQ buffers
2758 	 * aligned on a 16-byte boundry.
2759 	 */
2760 	sense_dma_attrs = mpt->m_msg_dma_attr;
2761 	sense_dma_attrs.dma_attr_align = 16;
2762 	sense_dma_attrs.dma_attr_sgllen = 1;
2763 
2764 	/*
2765 	 * allocate the request sense buffer pool.
2766 	 */
2767 	if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2768 	    &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2769 	    mem_size, &cookie) == FALSE) {
2770 		return (DDI_FAILURE);
2771 	}
2772 
2773 	/*
2774 	 * Store the request sense base memory address.  This chip uses this
2775 	 * address to dma the request sense data.  The second
2776 	 * address is the address mpt uses to access the data.
2777 	 * The third is the base for the extended rqsense buffers.
2778 	 */
2779 	mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2780 	mpt->m_req_sense = memp;
2781 	memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2782 	mpt->m_extreq_sense = memp;
2783 
2784 	/*
2785 	 * The extra memory is divided up into multiples of the base
2786 	 * buffer size in order to allocate via rmalloc().
2787 	 * Note that the rmallocmap cannot start at zero!
2788 	 */
2789 	num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2790 	    mpt->m_req_sense_size;
2791 	mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2792 	rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2793 
2794 	/*
2795 	 * Clear the pool.
2796 	 */
2797 	bzero(mpt->m_req_sense, mem_size);
2798 
2799 	return (DDI_SUCCESS);
2800 }
2801 
2802 static int
mptsas_alloc_reply_frames(mptsas_t * mpt)2803 mptsas_alloc_reply_frames(mptsas_t *mpt)
2804 {
2805 	ddi_dma_attr_t		frame_dma_attrs;
2806 	caddr_t			memp;
2807 	ddi_dma_cookie_t	cookie;
2808 	size_t			mem_size;
2809 
2810 	/*
2811 	 * re-alloc when it has already alloced
2812 	 */
2813 	if (mpt->m_dma_reply_frame_hdl) {
2814 		mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2815 		    &mpt->m_acc_reply_frame_hdl);
2816 	}
2817 
2818 	/*
2819 	 * The size of the reply frame pool is:
2820 	 *   Number of Reply Frames * Reply Frame Size
2821 	 */
2822 	mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2823 
2824 	/*
2825 	 * set the DMA attributes.   System Reply Message Frames must be
2826 	 * aligned on a 4-byte boundry.  This is the default.
2827 	 */
2828 	frame_dma_attrs = mpt->m_msg_dma_attr;
2829 	frame_dma_attrs.dma_attr_sgllen = 1;
2830 
2831 	/*
2832 	 * allocate the reply frame pool
2833 	 */
2834 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2835 	    &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2836 	    mem_size, &cookie) == FALSE) {
2837 		return (DDI_FAILURE);
2838 	}
2839 
2840 	/*
2841 	 * Store the reply frame memory address.  This chip uses this
2842 	 * address to dma to and from the driver's frame.  The second
2843 	 * address is the address mpt uses to process the frame.
2844 	 */
2845 	mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2846 	mpt->m_reply_frame = memp;
2847 
2848 	/*
2849 	 * Clear the reply frame pool.
2850 	 */
2851 	bzero(mpt->m_reply_frame, mem_size);
2852 
2853 	return (DDI_SUCCESS);
2854 }
2855 
2856 static int
mptsas_alloc_free_queue(mptsas_t * mpt)2857 mptsas_alloc_free_queue(mptsas_t *mpt)
2858 {
2859 	ddi_dma_attr_t		frame_dma_attrs;
2860 	caddr_t			memp;
2861 	ddi_dma_cookie_t	cookie;
2862 	size_t			mem_size;
2863 
2864 	/*
2865 	 * re-alloc when it has already alloced
2866 	 */
2867 	if (mpt->m_dma_free_queue_hdl) {
2868 		mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2869 		    &mpt->m_acc_free_queue_hdl);
2870 	}
2871 
2872 	/*
2873 	 * The reply free queue size is:
2874 	 *   Reply Free Queue Depth * 4
2875 	 * The "4" is the size of one 32 bit address (low part of 64-bit
2876 	 *   address)
2877 	 */
2878 	mem_size = mpt->m_free_queue_depth * 4;
2879 
2880 	/*
2881 	 * set the DMA attributes  The Reply Free Queue must be aligned on a
2882 	 * 16-byte boundry.
2883 	 */
2884 	frame_dma_attrs = mpt->m_msg_dma_attr;
2885 	frame_dma_attrs.dma_attr_align = 16;
2886 	frame_dma_attrs.dma_attr_sgllen = 1;
2887 
2888 	/*
2889 	 * allocate the reply free queue
2890 	 */
2891 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2892 	    &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2893 	    mem_size, &cookie) == FALSE) {
2894 		return (DDI_FAILURE);
2895 	}
2896 
2897 	/*
2898 	 * Store the reply free queue memory address.  This chip uses this
2899 	 * address to read from the reply free queue.  The second address
2900 	 * is the address mpt uses to manage the queue.
2901 	 */
2902 	mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2903 	mpt->m_free_queue = memp;
2904 
2905 	/*
2906 	 * Clear the reply free queue memory.
2907 	 */
2908 	bzero(mpt->m_free_queue, mem_size);
2909 
2910 	return (DDI_SUCCESS);
2911 }
2912 
2913 static int
mptsas_alloc_post_queue(mptsas_t * mpt)2914 mptsas_alloc_post_queue(mptsas_t *mpt)
2915 {
2916 	ddi_dma_attr_t		frame_dma_attrs;
2917 	caddr_t			memp;
2918 	ddi_dma_cookie_t	cookie;
2919 	size_t			mem_size;
2920 
2921 	/*
2922 	 * re-alloc when it has already alloced
2923 	 */
2924 	if (mpt->m_dma_post_queue_hdl) {
2925 		mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2926 		    &mpt->m_acc_post_queue_hdl);
2927 	}
2928 
2929 	/*
2930 	 * The reply descriptor post queue size is:
2931 	 *   Reply Descriptor Post Queue Depth * 8
2932 	 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2933 	 */
2934 	mem_size = mpt->m_post_queue_depth * 8;
2935 
2936 	/*
2937 	 * set the DMA attributes.  The Reply Descriptor Post Queue must be
2938 	 * aligned on a 16-byte boundry.
2939 	 */
2940 	frame_dma_attrs = mpt->m_msg_dma_attr;
2941 	frame_dma_attrs.dma_attr_align = 16;
2942 	frame_dma_attrs.dma_attr_sgllen = 1;
2943 
2944 	/*
2945 	 * allocate the reply post queue
2946 	 */
2947 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2948 	    &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2949 	    mem_size, &cookie) == FALSE) {
2950 		return (DDI_FAILURE);
2951 	}
2952 
2953 	/*
2954 	 * Store the reply descriptor post queue memory address.  This chip
2955 	 * uses this address to write to the reply descriptor post queue.  The
2956 	 * second address is the address mpt uses to manage the queue.
2957 	 */
2958 	mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2959 	mpt->m_post_queue = memp;
2960 
2961 	/*
2962 	 * Clear the reply post queue memory.
2963 	 */
2964 	bzero(mpt->m_post_queue, mem_size);
2965 
2966 	return (DDI_SUCCESS);
2967 }
2968 
2969 static void
mptsas_alloc_reply_args(mptsas_t * mpt)2970 mptsas_alloc_reply_args(mptsas_t *mpt)
2971 {
2972 	if (mpt->m_replyh_args == NULL) {
2973 		mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2974 		    mpt->m_max_replies, KM_SLEEP);
2975 	}
2976 }
2977 
2978 static int
mptsas_alloc_extra_sgl_frame(mptsas_t * mpt,mptsas_cmd_t * cmd)2979 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2980 {
2981 	mptsas_cache_frames_t	*frames = NULL;
2982 	if (cmd->cmd_extra_frames == NULL) {
2983 		frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2984 		if (frames == NULL) {
2985 			return (DDI_FAILURE);
2986 		}
2987 		cmd->cmd_extra_frames = frames;
2988 	}
2989 	return (DDI_SUCCESS);
2990 }
2991 
2992 static void
mptsas_free_extra_sgl_frame(mptsas_t * mpt,mptsas_cmd_t * cmd)2993 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2994 {
2995 	if (cmd->cmd_extra_frames) {
2996 		kmem_cache_free(mpt->m_cache_frames,
2997 		    (void *)cmd->cmd_extra_frames);
2998 		cmd->cmd_extra_frames = NULL;
2999 	}
3000 }
3001 
3002 static void
mptsas_cfg_fini(mptsas_t * mpt)3003 mptsas_cfg_fini(mptsas_t *mpt)
3004 {
3005 	NDBG0(("mptsas_cfg_fini"));
3006 	ddi_regs_map_free(&mpt->m_datap);
3007 }
3008 
3009 static void
mptsas_hba_fini(mptsas_t * mpt)3010 mptsas_hba_fini(mptsas_t *mpt)
3011 {
3012 	NDBG0(("mptsas_hba_fini"));
3013 
3014 	/*
3015 	 * Free up any allocated memory
3016 	 */
3017 	if (mpt->m_dma_req_frame_hdl) {
3018 		mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
3019 		    &mpt->m_acc_req_frame_hdl);
3020 	}
3021 
3022 	if (mpt->m_dma_req_sense_hdl) {
3023 		rmfreemap(mpt->m_erqsense_map);
3024 		mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
3025 		    &mpt->m_acc_req_sense_hdl);
3026 	}
3027 
3028 	if (mpt->m_dma_reply_frame_hdl) {
3029 		mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
3030 		    &mpt->m_acc_reply_frame_hdl);
3031 	}
3032 
3033 	if (mpt->m_dma_free_queue_hdl) {
3034 		mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
3035 		    &mpt->m_acc_free_queue_hdl);
3036 	}
3037 
3038 	if (mpt->m_dma_post_queue_hdl) {
3039 		mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
3040 		    &mpt->m_acc_post_queue_hdl);
3041 	}
3042 
3043 	if (mpt->m_replyh_args != NULL) {
3044 		kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
3045 		    * mpt->m_max_replies);
3046 	}
3047 }
3048 
3049 static int
mptsas_name_child(dev_info_t * lun_dip,char * name,int len)3050 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
3051 {
3052 	int		lun = 0;
3053 	char		*sas_wwn = NULL;
3054 	int		phynum = -1;
3055 	int		reallen = 0;
3056 
3057 	/* Get the target num */
3058 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
3059 	    LUN_PROP, 0);
3060 
3061 	if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3062 	    DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3063 		/*
3064 		 * Stick in the address of form "pPHY,LUN"
3065 		 */
3066 		reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3067 	} else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3068 	    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3069 	    == DDI_PROP_SUCCESS) {
3070 		/*
3071 		 * Stick in the address of the form "wWWN,LUN"
3072 		 */
3073 		reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3074 		ddi_prop_free(sas_wwn);
3075 	} else {
3076 		return (DDI_FAILURE);
3077 	}
3078 
3079 	ASSERT(reallen < len);
3080 	if (reallen >= len) {
3081 		mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
3082 		    "length too small, it needs to be %d bytes", reallen + 1);
3083 	}
3084 	return (DDI_SUCCESS);
3085 }
3086 
3087 /*
3088  * tran_tgt_init(9E) - target device instance initialization
3089  */
3090 static int
mptsas_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)3091 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3092     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3093 {
3094 #ifndef __lock_lint
3095 	_NOTE(ARGUNUSED(hba_tran))
3096 #endif
3097 
3098 	/*
3099 	 * At this point, the scsi_device structure already exists
3100 	 * and has been initialized.
3101 	 *
3102 	 * Use this function to allocate target-private data structures,
3103 	 * if needed by this HBA.  Add revised flow-control and queue
3104 	 * properties for child here, if desired and if you can tell they
3105 	 * support tagged queueing by now.
3106 	 */
3107 	mptsas_t		*mpt;
3108 	int			lun = sd->sd_address.a_lun;
3109 	mdi_pathinfo_t		*pip = NULL;
3110 	mptsas_tgt_private_t	*tgt_private = NULL;
3111 	mptsas_target_t		*ptgt = NULL;
3112 	char			*psas_wwn = NULL;
3113 	mptsas_phymask_t	phymask = 0;
3114 	uint64_t		sas_wwn = 0;
3115 	mptsas_target_addr_t	addr;
3116 	mpt = SDEV2MPT(sd);
3117 
3118 	ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3119 
3120 	NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3121 	    (void *)hba_dip, (void *)tgt_dip, lun));
3122 
3123 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3124 		(void) ndi_merge_node(tgt_dip, mptsas_name_child);
3125 		ddi_set_name_addr(tgt_dip, NULL);
3126 		return (DDI_FAILURE);
3127 	}
3128 	/*
3129 	 * phymask is 0 means the virtual port for RAID
3130 	 */
3131 	phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3132 	    "phymask", 0);
3133 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3134 		if ((pip = (void *)(sd->sd_private)) == NULL) {
3135 			/*
3136 			 * Very bad news if this occurs. Somehow scsi_vhci has
3137 			 * lost the pathinfo node for this target.
3138 			 */
3139 			return (DDI_NOT_WELL_FORMED);
3140 		}
3141 
3142 		if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3143 		    DDI_PROP_SUCCESS) {
3144 			mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3145 			return (DDI_FAILURE);
3146 		}
3147 
3148 		if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3149 		    &psas_wwn) == MDI_SUCCESS) {
3150 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3151 				sas_wwn = 0;
3152 			}
3153 			(void) mdi_prop_free(psas_wwn);
3154 		}
3155 	} else {
3156 		lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3157 		    DDI_PROP_DONTPASS, LUN_PROP, 0);
3158 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3159 		    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3160 		    DDI_PROP_SUCCESS) {
3161 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3162 				sas_wwn = 0;
3163 			}
3164 			ddi_prop_free(psas_wwn);
3165 		} else {
3166 			sas_wwn = 0;
3167 		}
3168 	}
3169 
3170 	ASSERT((sas_wwn != 0) || (phymask != 0));
3171 	addr.mta_wwn = sas_wwn;
3172 	addr.mta_phymask = phymask;
3173 	mutex_enter(&mpt->m_mutex);
3174 	ptgt = refhash_lookup(mpt->m_targets, &addr);
3175 	mutex_exit(&mpt->m_mutex);
3176 	if (ptgt == NULL) {
3177 		mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3178 		    "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3179 		    sas_wwn);
3180 		return (DDI_FAILURE);
3181 	}
3182 	if (hba_tran->tran_tgt_private == NULL) {
3183 		tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3184 		    KM_SLEEP);
3185 		tgt_private->t_lun = lun;
3186 		tgt_private->t_private = ptgt;
3187 		hba_tran->tran_tgt_private = tgt_private;
3188 	}
3189 
3190 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3191 		return (DDI_SUCCESS);
3192 	}
3193 	mutex_enter(&mpt->m_mutex);
3194 
3195 	if (ptgt->m_deviceinfo &
3196 	    (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3197 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3198 		uchar_t *inq89 = NULL;
3199 		int inq89_len = 0x238;
3200 		int reallen = 0;
3201 		int rval = 0;
3202 		struct sata_id *sid = NULL;
3203 		char model[SATA_ID_MODEL_LEN + 1];
3204 		char fw[SATA_ID_FW_LEN + 1];
3205 		char *vid, *pid;
3206 
3207 		mutex_exit(&mpt->m_mutex);
3208 		/*
3209 		 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3210 		 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3211 		 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3212 		 */
3213 		inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3214 		rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3215 		    inq89, inq89_len, &reallen, 1);
3216 
3217 		if (rval != 0) {
3218 			if (inq89 != NULL) {
3219 				kmem_free(inq89, inq89_len);
3220 			}
3221 
3222 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3223 			    "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3224 			return (DDI_SUCCESS);
3225 		}
3226 		sid = (void *)(&inq89[60]);
3227 
3228 		swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3229 		swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3230 
3231 		model[SATA_ID_MODEL_LEN] = 0;
3232 		fw[SATA_ID_FW_LEN] = 0;
3233 
3234 		sata_split_model(model, &vid, &pid);
3235 
3236 		/*
3237 		 * override SCSA "inquiry-*" properties
3238 		 */
3239 		if (vid)
3240 			(void) scsi_device_prop_update_inqstring(sd,
3241 			    INQUIRY_VENDOR_ID, vid, strlen(vid));
3242 		if (pid)
3243 			(void) scsi_device_prop_update_inqstring(sd,
3244 			    INQUIRY_PRODUCT_ID, pid, strlen(pid));
3245 		(void) scsi_device_prop_update_inqstring(sd,
3246 		    INQUIRY_REVISION_ID, fw, strlen(fw));
3247 
3248 		if (inq89 != NULL) {
3249 			kmem_free(inq89, inq89_len);
3250 		}
3251 	} else {
3252 		mutex_exit(&mpt->m_mutex);
3253 	}
3254 
3255 	return (DDI_SUCCESS);
3256 }
3257 /*
3258  * tran_tgt_free(9E) - target device instance deallocation
3259  */
3260 static void
mptsas_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)3261 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3262     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3263 {
3264 #ifndef __lock_lint
3265 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3266 #endif
3267 
3268 	mptsas_tgt_private_t	*tgt_private = hba_tran->tran_tgt_private;
3269 
3270 	if (tgt_private != NULL) {
3271 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3272 		hba_tran->tran_tgt_private = NULL;
3273 	}
3274 }
3275 
3276 /*
3277  * scsi_pkt handling
3278  *
3279  * Visible to the external world via the transport structure.
3280  */
3281 
3282 /*
3283  * Notes:
3284  *	- transport the command to the addressed SCSI target/lun device
3285  *	- normal operation is to schedule the command to be transported,
3286  *	  and return TRAN_ACCEPT if this is successful.
3287  *	- if NO_INTR, tran_start must poll device for command completion
3288  */
3289 static int
mptsas_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)3290 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3291 {
3292 #ifndef __lock_lint
3293 	_NOTE(ARGUNUSED(ap))
3294 #endif
3295 	mptsas_t	*mpt = PKT2MPT(pkt);
3296 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3297 	int		rval;
3298 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3299 
3300 	NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3301 	ASSERT(ptgt);
3302 	if (ptgt == NULL)
3303 		return (TRAN_FATAL_ERROR);
3304 
3305 	/*
3306 	 * prepare the pkt before taking mutex.
3307 	 */
3308 	rval = mptsas_prepare_pkt(cmd);
3309 	if (rval != TRAN_ACCEPT) {
3310 		return (rval);
3311 	}
3312 
3313 	/*
3314 	 * Send the command to target/lun, however your HBA requires it.
3315 	 * If busy, return TRAN_BUSY; if there's some other formatting error
3316 	 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3317 	 * return of TRAN_ACCEPT.
3318 	 *
3319 	 * Remember that access to shared resources, including the mptsas_t
3320 	 * data structure and the HBA hardware registers, must be protected
3321 	 * with mutexes, here and everywhere.
3322 	 *
3323 	 * Also remember that at interrupt time, you'll get an argument
3324 	 * to the interrupt handler which is a pointer to your mptsas_t
3325 	 * structure; you'll have to remember which commands are outstanding
3326 	 * and which scsi_pkt is the currently-running command so the
3327 	 * interrupt handler can refer to the pkt to set completion
3328 	 * status, call the target driver back through pkt_comp, etc.
3329 	 *
3330 	 * If the instance lock is held by other thread, don't spin to wait
3331 	 * for it. Instead, queue the cmd and next time when the instance lock
3332 	 * is not held, accept all the queued cmd. A extra tx_waitq is
3333 	 * introduced to protect the queue.
3334 	 *
3335 	 * The polled cmd will not be queud and accepted as usual.
3336 	 *
3337 	 * Under the tx_waitq mutex, record whether a thread is draining
3338 	 * the tx_waitq.  An IO requesting thread that finds the instance
3339 	 * mutex contended appends to the tx_waitq and while holding the
3340 	 * tx_wait mutex, if the draining flag is not set, sets it and then
3341 	 * proceeds to spin for the instance mutex. This scheme ensures that
3342 	 * the last cmd in a burst be processed.
3343 	 *
3344 	 * we enable this feature only when the helper threads are enabled,
3345 	 * at which we think the loads are heavy.
3346 	 *
3347 	 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3348 	 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3349 	 */
3350 
3351 	if (mpt->m_doneq_thread_n) {
3352 		if (mutex_tryenter(&mpt->m_mutex) != 0) {
3353 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3354 			mutex_exit(&mpt->m_mutex);
3355 		} else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3356 			mutex_enter(&mpt->m_mutex);
3357 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3358 			mutex_exit(&mpt->m_mutex);
3359 		} else {
3360 			mutex_enter(&mpt->m_tx_waitq_mutex);
3361 			/*
3362 			 * ptgt->m_dr_flag is protected by m_mutex or
3363 			 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3364 			 * is acquired.
3365 			 */
3366 			if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3367 				if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3368 					/*
3369 					 * The command should be allowed to
3370 					 * retry by returning TRAN_BUSY to
3371 					 * to stall the I/O's which come from
3372 					 * scsi_vhci since the device/path is
3373 					 * in unstable state now.
3374 					 */
3375 					mutex_exit(&mpt->m_tx_waitq_mutex);
3376 					return (TRAN_BUSY);
3377 				} else {
3378 					/*
3379 					 * The device is offline, just fail the
3380 					 * command by returning
3381 					 * TRAN_FATAL_ERROR.
3382 					 */
3383 					mutex_exit(&mpt->m_tx_waitq_mutex);
3384 					return (TRAN_FATAL_ERROR);
3385 				}
3386 			}
3387 			if (mpt->m_tx_draining) {
3388 				cmd->cmd_flags |= CFLAG_TXQ;
3389 				*mpt->m_tx_waitqtail = cmd;
3390 				mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3391 				mutex_exit(&mpt->m_tx_waitq_mutex);
3392 			} else { /* drain the queue */
3393 				mpt->m_tx_draining = 1;
3394 				mutex_exit(&mpt->m_tx_waitq_mutex);
3395 				mutex_enter(&mpt->m_mutex);
3396 				rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3397 				mutex_exit(&mpt->m_mutex);
3398 			}
3399 		}
3400 	} else {
3401 		mutex_enter(&mpt->m_mutex);
3402 		/*
3403 		 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3404 		 * in this case, m_mutex is acquired.
3405 		 */
3406 		if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3407 			if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3408 				/*
3409 				 * commands should be allowed to retry by
3410 				 * returning TRAN_BUSY to stall the I/O's
3411 				 * which come from scsi_vhci since the device/
3412 				 * path is in unstable state now.
3413 				 */
3414 				mutex_exit(&mpt->m_mutex);
3415 				return (TRAN_BUSY);
3416 			} else {
3417 				/*
3418 				 * The device is offline, just fail the
3419 				 * command by returning TRAN_FATAL_ERROR.
3420 				 */
3421 				mutex_exit(&mpt->m_mutex);
3422 				return (TRAN_FATAL_ERROR);
3423 			}
3424 		}
3425 		rval = mptsas_accept_pkt(mpt, cmd);
3426 		mutex_exit(&mpt->m_mutex);
3427 	}
3428 
3429 	return (rval);
3430 }
3431 
3432 /*
3433  * Accept all the queued cmds(if any) before accept the current one.
3434  */
3435 static int
mptsas_accept_txwq_and_pkt(mptsas_t * mpt,mptsas_cmd_t * cmd)3436 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3437 {
3438 	int rval;
3439 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3440 
3441 	ASSERT(mutex_owned(&mpt->m_mutex));
3442 	/*
3443 	 * The call to mptsas_accept_tx_waitq() must always be performed
3444 	 * because that is where mpt->m_tx_draining is cleared.
3445 	 */
3446 	mutex_enter(&mpt->m_tx_waitq_mutex);
3447 	mptsas_accept_tx_waitq(mpt);
3448 	mutex_exit(&mpt->m_tx_waitq_mutex);
3449 	/*
3450 	 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3451 	 * in this case, m_mutex is acquired.
3452 	 */
3453 	if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3454 		if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3455 			/*
3456 			 * The command should be allowed to retry by returning
3457 			 * TRAN_BUSY to stall the I/O's which come from
3458 			 * scsi_vhci since the device/path is in unstable state
3459 			 * now.
3460 			 */
3461 			return (TRAN_BUSY);
3462 		} else {
3463 			/*
3464 			 * The device is offline, just fail the command by
3465 			 * return TRAN_FATAL_ERROR.
3466 			 */
3467 			return (TRAN_FATAL_ERROR);
3468 		}
3469 	}
3470 	rval = mptsas_accept_pkt(mpt, cmd);
3471 
3472 	return (rval);
3473 }
3474 
3475 static int
mptsas_accept_pkt(mptsas_t * mpt,mptsas_cmd_t * cmd)3476 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3477 {
3478 	int		rval = TRAN_ACCEPT;
3479 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3480 
3481 	NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3482 
3483 	ASSERT(mutex_owned(&mpt->m_mutex));
3484 
3485 	if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3486 		rval = mptsas_prepare_pkt(cmd);
3487 		if (rval != TRAN_ACCEPT) {
3488 			cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3489 			return (rval);
3490 		}
3491 	}
3492 
3493 	/*
3494 	 * reset the throttle if we were draining
3495 	 */
3496 	if ((ptgt->m_t_ncmds == 0) &&
3497 	    (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3498 		NDBG23(("reset throttle"));
3499 		ASSERT(ptgt->m_reset_delay == 0);
3500 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3501 	}
3502 
3503 	/*
3504 	 * If HBA is being reset, the DevHandles are being re-initialized,
3505 	 * which means that they could be invalid even if the target is still
3506 	 * attached.  Check if being reset and if DevHandle is being
3507 	 * re-initialized.  If this is the case, return BUSY so the I/O can be
3508 	 * retried later.
3509 	 */
3510 	if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3511 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3512 		if (cmd->cmd_flags & CFLAG_TXQ) {
3513 			mptsas_doneq_add(mpt, cmd);
3514 			mptsas_doneq_empty(mpt);
3515 			return (rval);
3516 		} else {
3517 			return (TRAN_BUSY);
3518 		}
3519 	}
3520 
3521 	/*
3522 	 * If device handle has already been invalidated, just
3523 	 * fail the command. In theory, command from scsi_vhci
3524 	 * client is impossible send down command with invalid
3525 	 * devhdl since devhdl is set after path offline, target
3526 	 * driver is not suppose to select a offlined path.
3527 	 */
3528 	if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3529 		NDBG3(("rejecting command, it might because invalid devhdl "
3530 		    "request."));
3531 		mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3532 		if (cmd->cmd_flags & CFLAG_TXQ) {
3533 			mptsas_doneq_add(mpt, cmd);
3534 			mptsas_doneq_empty(mpt);
3535 			return (rval);
3536 		} else {
3537 			return (TRAN_FATAL_ERROR);
3538 		}
3539 	}
3540 	/*
3541 	 * The first case is the normal case.  mpt gets a command from the
3542 	 * target driver and starts it.
3543 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3544 	 * commands is m_max_requests - 2.
3545 	 */
3546 	if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3547 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3548 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3549 	    (ptgt->m_reset_delay == 0) &&
3550 	    (ptgt->m_t_nwait == 0) &&
3551 	    ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3552 		if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3553 			(void) mptsas_start_cmd(mpt, cmd);
3554 		} else {
3555 			mptsas_waitq_add(mpt, cmd);
3556 		}
3557 	} else {
3558 		/*
3559 		 * Add this pkt to the work queue
3560 		 */
3561 		mptsas_waitq_add(mpt, cmd);
3562 
3563 		if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3564 			(void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3565 
3566 			/*
3567 			 * Only flush the doneq if this is not a TM
3568 			 * cmd.  For TM cmds the flushing of the
3569 			 * doneq will be done in those routines.
3570 			 */
3571 			if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3572 				mptsas_doneq_empty(mpt);
3573 			}
3574 		}
3575 	}
3576 	return (rval);
3577 }
3578 
3579 int
mptsas_save_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)3580 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3581 {
3582 	mptsas_slots_t *slots = mpt->m_active;
3583 	uint_t slot, start_rotor;
3584 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3585 
3586 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
3587 
3588 	/*
3589 	 * Account for reserved TM request slot and reserved SMID of 0.
3590 	 */
3591 	ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3592 
3593 	/*
3594 	 * Find the next available slot, beginning at m_rotor.  If no slot is
3595 	 * available, we'll return FALSE to indicate that.  This mechanism
3596 	 * considers only the normal slots, not the reserved slot 0 nor the
3597 	 * task management slot m_n_normal + 1.  The rotor is left to point to
3598 	 * the normal slot after the one we select, unless we select the last
3599 	 * normal slot in which case it returns to slot 1.
3600 	 */
3601 	start_rotor = slots->m_rotor;
3602 	do {
3603 		slot = slots->m_rotor++;
3604 		if (slots->m_rotor > slots->m_n_normal)
3605 			slots->m_rotor = 1;
3606 
3607 		if (slots->m_rotor == start_rotor)
3608 			break;
3609 	} while (slots->m_slot[slot] != NULL);
3610 
3611 	if (slots->m_slot[slot] != NULL)
3612 		return (FALSE);
3613 
3614 	ASSERT(slot != 0 && slot <= slots->m_n_normal);
3615 
3616 	cmd->cmd_slot = slot;
3617 	slots->m_slot[slot] = cmd;
3618 	mpt->m_ncmds++;
3619 
3620 	/*
3621 	 * only increment per target ncmds if this is not a
3622 	 * command that has no target associated with it (i.e. a
3623 	 * event acknoledgment)
3624 	 */
3625 	if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3626 		/*
3627 		 * Expiration time is set in mptsas_start_cmd
3628 		 */
3629 		ptgt->m_t_ncmds++;
3630 		cmd->cmd_active_expiration = 0;
3631 	} else {
3632 		/*
3633 		 * Initialize expiration time for passthrough commands,
3634 		 */
3635 		cmd->cmd_active_expiration = gethrtime() +
3636 		    (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3637 	}
3638 	return (TRUE);
3639 }
3640 
3641 /*
3642  * prepare the pkt:
3643  * the pkt may have been resubmitted or just reused so
3644  * initialize some fields and do some checks.
3645  */
3646 static int
mptsas_prepare_pkt(mptsas_cmd_t * cmd)3647 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3648 {
3649 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
3650 
3651 	NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3652 
3653 	/*
3654 	 * Reinitialize some fields that need it; the packet may
3655 	 * have been resubmitted
3656 	 */
3657 	pkt->pkt_reason = CMD_CMPLT;
3658 	pkt->pkt_state = 0;
3659 	pkt->pkt_statistics = 0;
3660 	pkt->pkt_resid = 0;
3661 	cmd->cmd_age = 0;
3662 	cmd->cmd_pkt_flags = pkt->pkt_flags;
3663 
3664 	/*
3665 	 * zero status byte.
3666 	 */
3667 	*(pkt->pkt_scbp) = 0;
3668 
3669 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3670 		pkt->pkt_resid = cmd->cmd_dmacount;
3671 
3672 		/*
3673 		 * consistent packets need to be sync'ed first
3674 		 * (only for data going out)
3675 		 */
3676 		if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3677 		    (cmd->cmd_flags & CFLAG_DMASEND)) {
3678 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3679 			    DDI_DMA_SYNC_FORDEV);
3680 		}
3681 	}
3682 
3683 	cmd->cmd_flags =
3684 	    (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3685 	    CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3686 
3687 	return (TRAN_ACCEPT);
3688 }
3689 
3690 /*
3691  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3692  *
3693  * One of three possibilities:
3694  *	- allocate scsi_pkt
3695  *	- allocate scsi_pkt and DMA resources
3696  *	- allocate DMA resources to an already-allocated pkt
3697  */
3698 static struct scsi_pkt *
mptsas_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)3699 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3700     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3701     int (*callback)(), caddr_t arg)
3702 {
3703 	mptsas_cmd_t		*cmd, *new_cmd;
3704 	mptsas_t		*mpt = ADDR2MPT(ap);
3705 	uint_t			oldcookiec;
3706 	mptsas_target_t		*ptgt = NULL;
3707 	int			rval;
3708 	mptsas_tgt_private_t	*tgt_private;
3709 	int			kf;
3710 
3711 	kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3712 
3713 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3714 	    tran_tgt_private;
3715 	ASSERT(tgt_private != NULL);
3716 	if (tgt_private == NULL) {
3717 		return (NULL);
3718 	}
3719 	ptgt = tgt_private->t_private;
3720 	ASSERT(ptgt != NULL);
3721 	if (ptgt == NULL)
3722 		return (NULL);
3723 	ap->a_target = ptgt->m_devhdl;
3724 	ap->a_lun = tgt_private->t_lun;
3725 
3726 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3727 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3728 	statuslen *= 100; tgtlen *= 4;
3729 #endif
3730 	NDBG3(("mptsas_scsi_init_pkt:\n"
3731 	    "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3732 	    ap->a_target, (void *)pkt, (void *)bp,
3733 	    cmdlen, statuslen, tgtlen, flags));
3734 
3735 	/*
3736 	 * Allocate the new packet.
3737 	 */
3738 	if (pkt == NULL) {
3739 		ddi_dma_handle_t	save_dma_handle;
3740 
3741 		cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3742 		if (cmd == NULL)
3743 			return (NULL);
3744 
3745 		save_dma_handle = cmd->cmd_dmahandle;
3746 		bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3747 		cmd->cmd_dmahandle = save_dma_handle;
3748 
3749 		pkt = (void *)((uchar_t *)cmd +
3750 		    sizeof (struct mptsas_cmd));
3751 		pkt->pkt_ha_private = (opaque_t)cmd;
3752 		pkt->pkt_address = *ap;
3753 		pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3754 		pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3755 		pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3756 		cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3757 		cmd->cmd_cdblen = (uchar_t)cmdlen;
3758 		cmd->cmd_scblen = statuslen;
3759 		cmd->cmd_rqslen = SENSE_LENGTH;
3760 		cmd->cmd_tgt_addr = ptgt;
3761 
3762 		if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3763 		    (tgtlen > PKT_PRIV_LEN) ||
3764 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
3765 			int failure;
3766 
3767 			/*
3768 			 * We are going to allocate external packet space which
3769 			 * might include the sense data buffer for DMA so we
3770 			 * need to increase the reference counter here.  In a
3771 			 * case the HBA is in reset we just simply free the
3772 			 * allocated packet and bail out.
3773 			 */
3774 			mutex_enter(&mpt->m_mutex);
3775 			if (mpt->m_in_reset) {
3776 				mutex_exit(&mpt->m_mutex);
3777 
3778 				cmd->cmd_flags = CFLAG_FREE;
3779 				kmem_cache_free(mpt->m_kmem_cache, cmd);
3780 				return (NULL);
3781 			}
3782 			mpt->m_extreq_sense_refcount++;
3783 			ASSERT(mpt->m_extreq_sense_refcount > 0);
3784 			mutex_exit(&mpt->m_mutex);
3785 
3786 			/*
3787 			 * if extern alloc fails, all will be
3788 			 * deallocated, including cmd
3789 			 */
3790 			failure = mptsas_pkt_alloc_extern(mpt, cmd,
3791 			    cmdlen, tgtlen, statuslen, kf);
3792 
3793 			if (failure != 0 || cmd->cmd_extrqslen == 0) {
3794 				/*
3795 				 * If the external packet space allocation
3796 				 * failed, or we didn't allocate the sense
3797 				 * data buffer for DMA we need to decrease the
3798 				 * reference counter.
3799 				 */
3800 				mutex_enter(&mpt->m_mutex);
3801 				ASSERT(mpt->m_extreq_sense_refcount > 0);
3802 				mpt->m_extreq_sense_refcount--;
3803 				if (mpt->m_extreq_sense_refcount == 0)
3804 					cv_broadcast(
3805 					    &mpt->m_extreq_sense_refcount_cv);
3806 				mutex_exit(&mpt->m_mutex);
3807 
3808 				if (failure != 0) {
3809 					/*
3810 					 * if extern allocation fails, it will
3811 					 * deallocate the new pkt as well
3812 					 */
3813 					return (NULL);
3814 				}
3815 			}
3816 		}
3817 		new_cmd = cmd;
3818 
3819 	} else {
3820 		cmd = PKT2CMD(pkt);
3821 		new_cmd = NULL;
3822 	}
3823 
3824 
3825 	/* grab cmd->cmd_cookiec here as oldcookiec */
3826 
3827 	oldcookiec = cmd->cmd_cookiec;
3828 
3829 	/*
3830 	 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3831 	 * greater than 0 and we'll need to grab the next dma window
3832 	 */
3833 	/*
3834 	 * SLM-not doing extra command frame right now; may add later
3835 	 */
3836 
3837 	if (cmd->cmd_nwin > 0) {
3838 
3839 		/*
3840 		 * Make sure we havn't gone past the the total number
3841 		 * of windows
3842 		 */
3843 		if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3844 			return (NULL);
3845 		}
3846 		if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3847 		    &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3848 		    &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3849 			return (NULL);
3850 		}
3851 		goto get_dma_cookies;
3852 	}
3853 
3854 
3855 	if (flags & PKT_XARQ) {
3856 		cmd->cmd_flags |= CFLAG_XARQ;
3857 	}
3858 
3859 	/*
3860 	 * DMA resource allocation.  This version assumes your
3861 	 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3862 	 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3863 	 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3864 	 */
3865 	if (bp && (bp->b_bcount != 0) &&
3866 	    (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3867 
3868 		int	cnt, dma_flags;
3869 		mptti_t	*dmap;		/* ptr to the S/G list */
3870 
3871 		/*
3872 		 * Set up DMA memory and position to the next DMA segment.
3873 		 */
3874 		ASSERT(cmd->cmd_dmahandle != NULL);
3875 
3876 		if (bp->b_flags & B_READ) {
3877 			dma_flags = DDI_DMA_READ;
3878 			cmd->cmd_flags &= ~CFLAG_DMASEND;
3879 		} else {
3880 			dma_flags = DDI_DMA_WRITE;
3881 			cmd->cmd_flags |= CFLAG_DMASEND;
3882 		}
3883 		if (flags & PKT_CONSISTENT) {
3884 			cmd->cmd_flags |= CFLAG_CMDIOPB;
3885 			dma_flags |= DDI_DMA_CONSISTENT;
3886 		}
3887 
3888 		if (flags & PKT_DMA_PARTIAL) {
3889 			dma_flags |= DDI_DMA_PARTIAL;
3890 		}
3891 
3892 		/*
3893 		 * workaround for byte hole issue on psycho and
3894 		 * schizo pre 2.1
3895 		 */
3896 		if ((bp->b_flags & B_READ) && ((bp->b_flags &
3897 		    (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3898 		    ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3899 			dma_flags |= DDI_DMA_CONSISTENT;
3900 		}
3901 
3902 		rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3903 		    dma_flags, callback, arg,
3904 		    &cmd->cmd_cookie, &cmd->cmd_cookiec);
3905 		if (rval == DDI_DMA_PARTIAL_MAP) {
3906 			(void) ddi_dma_numwin(cmd->cmd_dmahandle,
3907 			    &cmd->cmd_nwin);
3908 			cmd->cmd_winindex = 0;
3909 			(void) ddi_dma_getwin(cmd->cmd_dmahandle,
3910 			    cmd->cmd_winindex, &cmd->cmd_dma_offset,
3911 			    &cmd->cmd_dma_len, &cmd->cmd_cookie,
3912 			    &cmd->cmd_cookiec);
3913 		} else if (rval && (rval != DDI_DMA_MAPPED)) {
3914 			switch (rval) {
3915 			case DDI_DMA_NORESOURCES:
3916 				bioerror(bp, 0);
3917 				break;
3918 			case DDI_DMA_BADATTR:
3919 			case DDI_DMA_NOMAPPING:
3920 				bioerror(bp, EFAULT);
3921 				break;
3922 			case DDI_DMA_TOOBIG:
3923 			default:
3924 				bioerror(bp, EINVAL);
3925 				break;
3926 			}
3927 			cmd->cmd_flags &= ~CFLAG_DMAVALID;
3928 			if (new_cmd) {
3929 				mptsas_scsi_destroy_pkt(ap, pkt);
3930 			}
3931 			return ((struct scsi_pkt *)NULL);
3932 		}
3933 
3934 get_dma_cookies:
3935 		cmd->cmd_flags |= CFLAG_DMAVALID;
3936 		ASSERT(cmd->cmd_cookiec > 0);
3937 
3938 		if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3939 			mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3940 			    cmd->cmd_cookiec);
3941 			bioerror(bp, EINVAL);
3942 			if (new_cmd) {
3943 				mptsas_scsi_destroy_pkt(ap, pkt);
3944 			}
3945 			return ((struct scsi_pkt *)NULL);
3946 		}
3947 
3948 		/*
3949 		 * Allocate extra SGL buffer if needed.
3950 		 */
3951 		if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3952 		    (cmd->cmd_extra_frames == NULL)) {
3953 			if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3954 			    DDI_FAILURE) {
3955 				mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3956 				    "failed");
3957 				bioerror(bp, ENOMEM);
3958 				if (new_cmd) {
3959 					mptsas_scsi_destroy_pkt(ap, pkt);
3960 				}
3961 				return ((struct scsi_pkt *)NULL);
3962 			}
3963 		}
3964 
3965 		/*
3966 		 * Always use scatter-gather transfer
3967 		 * Use the loop below to store physical addresses of
3968 		 * DMA segments, from the DMA cookies, into your HBA's
3969 		 * scatter-gather list.
3970 		 * We need to ensure we have enough kmem alloc'd
3971 		 * for the sg entries since we are no longer using an
3972 		 * array inside mptsas_cmd_t.
3973 		 *
3974 		 * We check cmd->cmd_cookiec against oldcookiec so
3975 		 * the scatter-gather list is correctly allocated
3976 		 */
3977 
3978 		if (oldcookiec != cmd->cmd_cookiec) {
3979 			if (cmd->cmd_sg != (mptti_t *)NULL) {
3980 				kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3981 				    oldcookiec);
3982 				cmd->cmd_sg = NULL;
3983 			}
3984 		}
3985 
3986 		if (cmd->cmd_sg == (mptti_t *)NULL) {
3987 			cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3988 			    cmd->cmd_cookiec), kf);
3989 
3990 			if (cmd->cmd_sg == (mptti_t *)NULL) {
3991 				mptsas_log(mpt, CE_WARN,
3992 				    "unable to kmem_alloc enough memory "
3993 				    "for scatter/gather list");
3994 		/*
3995 		 * if we have an ENOMEM condition we need to behave
3996 		 * the same way as the rest of this routine
3997 		 */
3998 
3999 				bioerror(bp, ENOMEM);
4000 				if (new_cmd) {
4001 					mptsas_scsi_destroy_pkt(ap, pkt);
4002 				}
4003 				return ((struct scsi_pkt *)NULL);
4004 			}
4005 		}
4006 
4007 		dmap = cmd->cmd_sg;
4008 
4009 		ASSERT(cmd->cmd_cookie.dmac_size != 0);
4010 
4011 		/*
4012 		 * store the first segment into the S/G list
4013 		 */
4014 		dmap->count = cmd->cmd_cookie.dmac_size;
4015 		dmap->addr.address64.Low = (uint32_t)
4016 		    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4017 		dmap->addr.address64.High = (uint32_t)
4018 		    (cmd->cmd_cookie.dmac_laddress >> 32);
4019 
4020 		/*
4021 		 * dmacount counts the size of the dma for this window
4022 		 * (if partial dma is being used).  totaldmacount
4023 		 * keeps track of the total amount of dma we have
4024 		 * transferred for all the windows (needed to calculate
4025 		 * the resid value below).
4026 		 */
4027 		cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
4028 		cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4029 
4030 		/*
4031 		 * We already stored the first DMA scatter gather segment,
4032 		 * start at 1 if we need to store more.
4033 		 */
4034 		for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
4035 			/*
4036 			 * Get next DMA cookie
4037 			 */
4038 			ddi_dma_nextcookie(cmd->cmd_dmahandle,
4039 			    &cmd->cmd_cookie);
4040 			dmap++;
4041 
4042 			cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
4043 			cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4044 
4045 			/*
4046 			 * store the segment parms into the S/G list
4047 			 */
4048 			dmap->count = cmd->cmd_cookie.dmac_size;
4049 			dmap->addr.address64.Low = (uint32_t)
4050 			    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4051 			dmap->addr.address64.High = (uint32_t)
4052 			    (cmd->cmd_cookie.dmac_laddress >> 32);
4053 		}
4054 
4055 		/*
4056 		 * If this was partially allocated we set the resid
4057 		 * the amount of data NOT transferred in this window
4058 		 * If there is only one window, the resid will be 0
4059 		 */
4060 		pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
4061 		NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
4062 		    cmd->cmd_dmacount));
4063 	}
4064 	return (pkt);
4065 }
4066 
4067 /*
4068  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
4069  *
4070  * Notes:
4071  *	- also frees DMA resources if allocated
4072  *	- implicit DMA synchonization
4073  */
4074 static void
mptsas_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)4075 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4076 {
4077 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4078 	mptsas_t	*mpt = ADDR2MPT(ap);
4079 
4080 	NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
4081 	    ap->a_target, (void *)pkt));
4082 
4083 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4084 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4085 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4086 	}
4087 
4088 	if (cmd->cmd_sg) {
4089 		kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4090 		cmd->cmd_sg = NULL;
4091 	}
4092 
4093 	mptsas_free_extra_sgl_frame(mpt, cmd);
4094 
4095 	if ((cmd->cmd_flags &
4096 	    (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4097 	    CFLAG_SCBEXTERN)) == 0) {
4098 		cmd->cmd_flags = CFLAG_FREE;
4099 		kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4100 	} else {
4101 		boolean_t extrqslen = cmd->cmd_extrqslen != 0;
4102 
4103 		mptsas_pkt_destroy_extern(mpt, cmd);
4104 
4105 		/*
4106 		 * If the packet had the sense data buffer for DMA allocated we
4107 		 * need to decrease the reference counter.
4108 		 */
4109 		if (extrqslen) {
4110 			mutex_enter(&mpt->m_mutex);
4111 			ASSERT(mpt->m_extreq_sense_refcount > 0);
4112 			mpt->m_extreq_sense_refcount--;
4113 			if (mpt->m_extreq_sense_refcount == 0)
4114 				cv_broadcast(&mpt->m_extreq_sense_refcount_cv);
4115 			mutex_exit(&mpt->m_mutex);
4116 		}
4117 	}
4118 }
4119 
4120 /*
4121  * kmem cache constructor and destructor:
4122  * When constructing, we bzero the cmd and allocate the dma handle
4123  * When destructing, just free the dma handle
4124  */
4125 static int
mptsas_kmem_cache_constructor(void * buf,void * cdrarg,int kmflags)4126 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4127 {
4128 	mptsas_cmd_t		*cmd = buf;
4129 	mptsas_t		*mpt  = cdrarg;
4130 	int			(*callback)(caddr_t);
4131 
4132 	callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4133 
4134 	NDBG4(("mptsas_kmem_cache_constructor"));
4135 
4136 	/*
4137 	 * allocate a dma handle
4138 	 */
4139 	if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4140 	    NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4141 		cmd->cmd_dmahandle = NULL;
4142 		return (-1);
4143 	}
4144 	return (0);
4145 }
4146 
4147 static void
mptsas_kmem_cache_destructor(void * buf,void * cdrarg)4148 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4149 {
4150 #ifndef __lock_lint
4151 	_NOTE(ARGUNUSED(cdrarg))
4152 #endif
4153 	mptsas_cmd_t	*cmd = buf;
4154 
4155 	NDBG4(("mptsas_kmem_cache_destructor"));
4156 
4157 	if (cmd->cmd_dmahandle) {
4158 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
4159 		cmd->cmd_dmahandle = NULL;
4160 	}
4161 }
4162 
4163 static int
mptsas_cache_frames_constructor(void * buf,void * cdrarg,int kmflags)4164 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4165 {
4166 	mptsas_cache_frames_t	*p = buf;
4167 	mptsas_t		*mpt = cdrarg;
4168 	ddi_dma_attr_t		frame_dma_attr;
4169 	size_t			mem_size, alloc_len;
4170 	ddi_dma_cookie_t	cookie;
4171 	uint_t			ncookie;
4172 	int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4173 	    ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4174 
4175 	frame_dma_attr = mpt->m_msg_dma_attr;
4176 	frame_dma_attr.dma_attr_align = 0x10;
4177 	frame_dma_attr.dma_attr_sgllen = 1;
4178 
4179 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4180 	    &p->m_dma_hdl) != DDI_SUCCESS) {
4181 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4182 		    " extra SGL.");
4183 		return (DDI_FAILURE);
4184 	}
4185 
4186 	mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4187 
4188 	if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4189 	    DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4190 	    &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4191 		ddi_dma_free_handle(&p->m_dma_hdl);
4192 		p->m_dma_hdl = NULL;
4193 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4194 		    " extra SGL.");
4195 		return (DDI_FAILURE);
4196 	}
4197 
4198 	if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4199 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4200 	    &cookie, &ncookie) != DDI_DMA_MAPPED) {
4201 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
4202 		ddi_dma_free_handle(&p->m_dma_hdl);
4203 		p->m_dma_hdl = NULL;
4204 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4205 		    " extra SGL");
4206 		return (DDI_FAILURE);
4207 	}
4208 
4209 	/*
4210 	 * Store the SGL memory address.  This chip uses this
4211 	 * address to dma to and from the driver.  The second
4212 	 * address is the address mpt uses to fill in the SGL.
4213 	 */
4214 	p->m_phys_addr = cookie.dmac_laddress;
4215 
4216 	return (DDI_SUCCESS);
4217 }
4218 
4219 static void
mptsas_cache_frames_destructor(void * buf,void * cdrarg)4220 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4221 {
4222 #ifndef __lock_lint
4223 	_NOTE(ARGUNUSED(cdrarg))
4224 #endif
4225 	mptsas_cache_frames_t	*p = buf;
4226 	if (p->m_dma_hdl != NULL) {
4227 		(void) ddi_dma_unbind_handle(p->m_dma_hdl);
4228 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
4229 		ddi_dma_free_handle(&p->m_dma_hdl);
4230 		p->m_phys_addr = 0;
4231 		p->m_frames_addr = NULL;
4232 		p->m_dma_hdl = NULL;
4233 		p->m_acc_hdl = NULL;
4234 	}
4235 
4236 }
4237 
4238 /*
4239  * Figure out if we need to use a different method for the request
4240  * sense buffer and allocate from the map if necessary.
4241  */
4242 static boolean_t
mptsas_cmdarqsize(mptsas_t * mpt,mptsas_cmd_t * cmd,size_t senselength,int kf)4243 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4244 {
4245 	if (senselength > mpt->m_req_sense_size) {
4246 		unsigned long i;
4247 
4248 		/* Sense length is limited to an 8 bit value in MPI Spec. */
4249 		if (senselength > 255)
4250 			senselength = 255;
4251 		cmd->cmd_extrqschunks = (senselength +
4252 		    (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4253 		i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4254 		    (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4255 
4256 		if (i == 0)
4257 			return (B_FALSE);
4258 
4259 		cmd->cmd_extrqslen = (uint16_t)senselength;
4260 		cmd->cmd_extrqsidx = i - 1;
4261 		cmd->cmd_arq_buf = mpt->m_extreq_sense +
4262 		    (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4263 	} else {
4264 		cmd->cmd_rqslen = (uchar_t)senselength;
4265 	}
4266 
4267 	return (B_TRUE);
4268 }
4269 
4270 /*
4271  * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4272  * for non-standard length cdb, pkt_private, status areas
4273  * if allocation fails, then deallocate all external space and the pkt
4274  */
4275 /* ARGSUSED */
4276 static int
mptsas_pkt_alloc_extern(mptsas_t * mpt,mptsas_cmd_t * cmd,int cmdlen,int tgtlen,int statuslen,int kf)4277 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4278     int cmdlen, int tgtlen, int statuslen, int kf)
4279 {
4280 	caddr_t			cdbp, scbp, tgt;
4281 
4282 	NDBG3(("mptsas_pkt_alloc_extern: "
4283 	    "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4284 	    (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4285 
4286 	tgt = cdbp = scbp = NULL;
4287 	cmd->cmd_scblen		= statuslen;
4288 	cmd->cmd_privlen	= (uchar_t)tgtlen;
4289 
4290 	if (cmdlen > sizeof (cmd->cmd_cdb)) {
4291 		if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4292 			goto fail;
4293 		}
4294 		cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4295 		cmd->cmd_flags |= CFLAG_CDBEXTERN;
4296 	}
4297 	if (tgtlen > PKT_PRIV_LEN) {
4298 		if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4299 			goto fail;
4300 		}
4301 		cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4302 		cmd->cmd_pkt->pkt_private = tgt;
4303 	}
4304 	if (statuslen > EXTCMDS_STATUS_SIZE) {
4305 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4306 			goto fail;
4307 		}
4308 		cmd->cmd_flags |= CFLAG_SCBEXTERN;
4309 		cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4310 
4311 		/* allocate sense data buf for DMA */
4312 		if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4313 		    MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4314 		    kf) == B_FALSE)
4315 			goto fail;
4316 	}
4317 	return (0);
4318 fail:
4319 	mptsas_pkt_destroy_extern(mpt, cmd);
4320 	return (1);
4321 }
4322 
4323 /*
4324  * deallocate external pkt space and deallocate the pkt
4325  */
4326 static void
mptsas_pkt_destroy_extern(mptsas_t * mpt,mptsas_cmd_t * cmd)4327 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4328 {
4329 	NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4330 
4331 	if (cmd->cmd_flags & CFLAG_FREE) {
4332 		mptsas_log(mpt, CE_PANIC,
4333 		    "mptsas_pkt_destroy_extern: freeing free packet");
4334 		_NOTE(NOT_REACHED)
4335 		/* NOTREACHED */
4336 	}
4337 	if (cmd->cmd_extrqslen != 0) {
4338 		rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4339 		    cmd->cmd_extrqsidx + 1);
4340 	}
4341 	if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4342 		kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4343 	}
4344 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4345 		kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4346 	}
4347 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4348 		kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4349 	}
4350 	cmd->cmd_flags = CFLAG_FREE;
4351 	kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4352 }
4353 
4354 /*
4355  * tran_sync_pkt(9E) - explicit DMA synchronization
4356  */
4357 /*ARGSUSED*/
4358 static void
mptsas_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)4359 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4360 {
4361 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4362 
4363 	NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4364 	    ap->a_target, (void *)pkt));
4365 
4366 	if (cmd->cmd_dmahandle) {
4367 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4368 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
4369 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4370 	}
4371 }
4372 
4373 /*
4374  * tran_dmafree(9E) - deallocate DMA resources allocated for command
4375  */
4376 /*ARGSUSED*/
4377 static void
mptsas_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)4378 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4379 {
4380 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4381 	mptsas_t	*mpt = ADDR2MPT(ap);
4382 
4383 	NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4384 	    ap->a_target, (void *)pkt));
4385 
4386 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4387 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4388 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4389 	}
4390 
4391 	mptsas_free_extra_sgl_frame(mpt, cmd);
4392 }
4393 
4394 static void
mptsas_pkt_comp(struct scsi_pkt * pkt,mptsas_cmd_t * cmd)4395 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4396 {
4397 	if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4398 	    (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4399 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4400 		    DDI_DMA_SYNC_FORCPU);
4401 	}
4402 	(*pkt->pkt_comp)(pkt);
4403 }
4404 
4405 static void
mptsas_sge_mainframe(mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl,uint_t cookiec,uint32_t end_flags)4406 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4407     ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4408 {
4409 	pMpi2SGESimple64_t	sge;
4410 	mptti_t			*dmap;
4411 	uint32_t		flags;
4412 
4413 	dmap = cmd->cmd_sg;
4414 
4415 	sge = (pMpi2SGESimple64_t)(&frame->SGL);
4416 	while (cookiec--) {
4417 		ddi_put32(acc_hdl,
4418 		    &sge->Address.Low, dmap->addr.address64.Low);
4419 		ddi_put32(acc_hdl,
4420 		    &sge->Address.High, dmap->addr.address64.High);
4421 		ddi_put32(acc_hdl, &sge->FlagsLength,
4422 		    dmap->count);
4423 		flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4424 		flags |= ((uint32_t)
4425 		    (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4426 		    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4427 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4428 		    MPI2_SGE_FLAGS_SHIFT);
4429 
4430 		/*
4431 		 * If this is the last cookie, we set the flags
4432 		 * to indicate so
4433 		 */
4434 		if (cookiec == 0) {
4435 			flags |= end_flags;
4436 		}
4437 		if (cmd->cmd_flags & CFLAG_DMASEND) {
4438 			flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4439 			    MPI2_SGE_FLAGS_SHIFT);
4440 		} else {
4441 			flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4442 			    MPI2_SGE_FLAGS_SHIFT);
4443 		}
4444 		ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4445 		dmap++;
4446 		sge++;
4447 	}
4448 }
4449 
4450 static void
mptsas_sge_chain(mptsas_t * mpt,mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl)4451 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4452     pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4453 {
4454 	pMpi2SGESimple64_t	sge;
4455 	pMpi2SGEChain64_t	sgechain;
4456 	uint64_t		nframe_phys_addr;
4457 	uint_t			cookiec;
4458 	mptti_t			*dmap;
4459 	uint32_t		flags;
4460 
4461 	/*
4462 	 * Save the number of entries in the DMA
4463 	 * Scatter/Gather list
4464 	 */
4465 	cookiec = cmd->cmd_cookiec;
4466 
4467 	/*
4468 	 * Hereby we start to deal with multiple frames.
4469 	 * The process is as follows:
4470 	 * 1. Determine how many frames are needed for SGL element
4471 	 *    storage; Note that all frames are stored in contiguous
4472 	 *    memory space and in 64-bit DMA mode each element is
4473 	 *    3 double-words (12 bytes) long.
4474 	 * 2. Fill up the main frame. We need to do this separately
4475 	 *    since it contains the SCSI IO request header and needs
4476 	 *    dedicated processing. Note that the last 4 double-words
4477 	 *    of the SCSI IO header is for SGL element storage
4478 	 *    (MPI2_SGE_IO_UNION).
4479 	 * 3. Fill the chain element in the main frame, so the DMA
4480 	 *    engine can use the following frames.
4481 	 * 4. Enter a loop to fill the remaining frames. Note that the
4482 	 *    last frame contains no chain element.  The remaining
4483 	 *    frames go into the mpt SGL buffer allocated on the fly,
4484 	 *    not immediately following the main message frame, as in
4485 	 *    Gen1.
4486 	 * Some restrictions:
4487 	 * 1. For 64-bit DMA, the simple element and chain element
4488 	 *    are both of 3 double-words (12 bytes) in size, even
4489 	 *    though all frames are stored in the first 4G of mem
4490 	 *    range and the higher 32-bits of the address are always 0.
4491 	 * 2. On some controllers (like the 1064/1068), a frame can
4492 	 *    hold SGL elements with the last 1 or 2 double-words
4493 	 *    (4 or 8 bytes) un-used. On these controllers, we should
4494 	 *    recognize that there's not enough room for another SGL
4495 	 *    element and move the sge pointer to the next frame.
4496 	 */
4497 	int			i, j, k, l, frames, sgemax;
4498 	int			temp;
4499 	uint8_t			chainflags;
4500 	uint16_t		chainlength;
4501 	mptsas_cache_frames_t	*p;
4502 
4503 	/*
4504 	 * Sgemax is the number of SGE's that will fit
4505 	 * each extra frame and frames is total
4506 	 * number of frames we'll need.  1 sge entry per
4507 	 * frame is reseverd for the chain element thus the -1 below.
4508 	 */
4509 	sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4510 	    - 1);
4511 	temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4512 
4513 	/*
4514 	 * A little check to see if we need to round up the number
4515 	 * of frames we need
4516 	 */
4517 	if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4518 	    sgemax) > 1) {
4519 		frames = (temp + 1);
4520 	} else {
4521 		frames = temp;
4522 	}
4523 	dmap = cmd->cmd_sg;
4524 	sge = (pMpi2SGESimple64_t)(&frame->SGL);
4525 
4526 	/*
4527 	 * First fill in the main frame
4528 	 */
4529 	j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4530 	mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4531 	    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4532 	    MPI2_SGE_FLAGS_SHIFT));
4533 	dmap += j;
4534 	sge += j;
4535 	j++;
4536 
4537 	/*
4538 	 * Fill in the chain element in the main frame.
4539 	 * About calculation on ChainOffset:
4540 	 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4541 	 *    in the end reserved for SGL element storage
4542 	 *    (MPI2_SGE_IO_UNION); we should count it in our
4543 	 *    calculation.  See its definition in the header file.
4544 	 * 2. Constant j is the counter of the current SGL element
4545 	 *    that will be processed, and (j - 1) is the number of
4546 	 *    SGL elements that have been processed (stored in the
4547 	 *    main frame).
4548 	 * 3. ChainOffset value should be in units of double-words (4
4549 	 *    bytes) so the last value should be divided by 4.
4550 	 */
4551 	ddi_put8(acc_hdl, &frame->ChainOffset,
4552 	    (sizeof (MPI2_SCSI_IO_REQUEST) -
4553 	    sizeof (MPI2_SGE_IO_UNION) +
4554 	    (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4555 	sgechain = (pMpi2SGEChain64_t)sge;
4556 	chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4557 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4558 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4559 	ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4560 
4561 	/*
4562 	 * The size of the next frame is the accurate size of space
4563 	 * (in bytes) used to store the SGL elements. j is the counter
4564 	 * of SGL elements. (j - 1) is the number of SGL elements that
4565 	 * have been processed (stored in frames).
4566 	 */
4567 	if (frames >= 2) {
4568 		ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4569 		chainlength = mpt->m_req_frame_size /
4570 		    sizeof (MPI2_SGE_SIMPLE64) *
4571 		    sizeof (MPI2_SGE_SIMPLE64);
4572 	} else {
4573 		chainlength = ((cookiec - (j - 1)) *
4574 		    sizeof (MPI2_SGE_SIMPLE64));
4575 	}
4576 
4577 	p = cmd->cmd_extra_frames;
4578 
4579 	ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4580 	ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4581 	ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4582 
4583 	/*
4584 	 * If there are more than 2 frames left we have to
4585 	 * fill in the next chain offset to the location of
4586 	 * the chain element in the next frame.
4587 	 * sgemax is the number of simple elements in an extra
4588 	 * frame. Note that the value NextChainOffset should be
4589 	 * in double-words (4 bytes).
4590 	 */
4591 	if (frames >= 2) {
4592 		ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4593 		    (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4594 	} else {
4595 		ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4596 	}
4597 
4598 	/*
4599 	 * Jump to next frame;
4600 	 * Starting here, chain buffers go into the per command SGL.
4601 	 * This buffer is allocated when chain buffers are needed.
4602 	 */
4603 	sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4604 	i = cookiec;
4605 
4606 	/*
4607 	 * Start filling in frames with SGE's.  If we
4608 	 * reach the end of frame and still have SGE's
4609 	 * to fill we need to add a chain element and
4610 	 * use another frame.  j will be our counter
4611 	 * for what cookie we are at and i will be
4612 	 * the total cookiec. k is the current frame
4613 	 */
4614 	for (k = 1; k <= frames; k++) {
4615 		for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4616 
4617 			/*
4618 			 * If we have reached the end of frame
4619 			 * and we have more SGE's to fill in
4620 			 * we have to fill the final entry
4621 			 * with a chain element and then
4622 			 * continue to the next frame
4623 			 */
4624 			if ((l == (sgemax + 1)) && (k != frames)) {
4625 				sgechain = (pMpi2SGEChain64_t)sge;
4626 				j--;
4627 				chainflags = (
4628 				    MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4629 				    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4630 				    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4631 				ddi_put8(p->m_acc_hdl,
4632 				    &sgechain->Flags, chainflags);
4633 				/*
4634 				 * k is the frame counter and (k + 1)
4635 				 * is the number of the next frame.
4636 				 * Note that frames are in contiguous
4637 				 * memory space.
4638 				 */
4639 				nframe_phys_addr = p->m_phys_addr +
4640 				    (mpt->m_req_frame_size * k);
4641 				ddi_put32(p->m_acc_hdl,
4642 				    &sgechain->Address.Low,
4643 				    nframe_phys_addr);
4644 				ddi_put32(p->m_acc_hdl,
4645 				    &sgechain->Address.High,
4646 				    nframe_phys_addr >> 32);
4647 
4648 				/*
4649 				 * If there are more than 2 frames left
4650 				 * we have to next chain offset to
4651 				 * the location of the chain element
4652 				 * in the next frame and fill in the
4653 				 * length of the next chain
4654 				 */
4655 				if ((frames - k) >= 2) {
4656 					ddi_put8(p->m_acc_hdl,
4657 					    &sgechain->NextChainOffset,
4658 					    (sgemax *
4659 					    sizeof (MPI2_SGE_SIMPLE64))
4660 					    >> 2);
4661 					ddi_put16(p->m_acc_hdl,
4662 					    &sgechain->Length,
4663 					    mpt->m_req_frame_size /
4664 					    sizeof (MPI2_SGE_SIMPLE64) *
4665 					    sizeof (MPI2_SGE_SIMPLE64));
4666 				} else {
4667 					/*
4668 					 * This is the last frame. Set
4669 					 * the NextChainOffset to 0 and
4670 					 * Length is the total size of
4671 					 * all remaining simple elements
4672 					 */
4673 					ddi_put8(p->m_acc_hdl,
4674 					    &sgechain->NextChainOffset,
4675 					    0);
4676 					ddi_put16(p->m_acc_hdl,
4677 					    &sgechain->Length,
4678 					    (cookiec - j) *
4679 					    sizeof (MPI2_SGE_SIMPLE64));
4680 				}
4681 
4682 				/* Jump to the next frame */
4683 				sge = (pMpi2SGESimple64_t)
4684 				    ((char *)p->m_frames_addr +
4685 				    (int)mpt->m_req_frame_size * k);
4686 
4687 				continue;
4688 			}
4689 
4690 			ddi_put32(p->m_acc_hdl,
4691 			    &sge->Address.Low,
4692 			    dmap->addr.address64.Low);
4693 			ddi_put32(p->m_acc_hdl,
4694 			    &sge->Address.High,
4695 			    dmap->addr.address64.High);
4696 			ddi_put32(p->m_acc_hdl,
4697 			    &sge->FlagsLength, dmap->count);
4698 			flags = ddi_get32(p->m_acc_hdl,
4699 			    &sge->FlagsLength);
4700 			flags |= ((uint32_t)(
4701 			    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4702 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4703 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4704 			    MPI2_SGE_FLAGS_SHIFT);
4705 
4706 			/*
4707 			 * If we are at the end of the frame and
4708 			 * there is another frame to fill in
4709 			 * we set the last simple element as last
4710 			 * element
4711 			 */
4712 			if ((l == sgemax) && (k != frames)) {
4713 				flags |= ((uint32_t)
4714 				    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4715 				    MPI2_SGE_FLAGS_SHIFT);
4716 			}
4717 
4718 			/*
4719 			 * If this is the final cookie we
4720 			 * indicate it by setting the flags
4721 			 */
4722 			if (j == i) {
4723 				flags |= ((uint32_t)
4724 				    (MPI2_SGE_FLAGS_LAST_ELEMENT |
4725 				    MPI2_SGE_FLAGS_END_OF_BUFFER |
4726 				    MPI2_SGE_FLAGS_END_OF_LIST) <<
4727 				    MPI2_SGE_FLAGS_SHIFT);
4728 			}
4729 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4730 				flags |=
4731 				    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4732 				    MPI2_SGE_FLAGS_SHIFT);
4733 			} else {
4734 				flags |=
4735 				    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4736 				    MPI2_SGE_FLAGS_SHIFT);
4737 			}
4738 			ddi_put32(p->m_acc_hdl,
4739 			    &sge->FlagsLength, flags);
4740 			dmap++;
4741 			sge++;
4742 		}
4743 	}
4744 
4745 	/*
4746 	 * Sync DMA with the chain buffers that were just created
4747 	 */
4748 	(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4749 }
4750 
4751 static void
mptsas_ieee_sge_mainframe(mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl,uint_t cookiec,uint8_t end_flag)4752 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4753     ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4754 {
4755 	pMpi2IeeeSgeSimple64_t	ieeesge;
4756 	mptti_t			*dmap;
4757 	uint8_t			flags;
4758 
4759 	dmap = cmd->cmd_sg;
4760 
4761 	NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4762 	    cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4763 
4764 	ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4765 	while (cookiec--) {
4766 		ddi_put32(acc_hdl,
4767 		    &ieeesge->Address.Low, dmap->addr.address64.Low);
4768 		ddi_put32(acc_hdl,
4769 		    &ieeesge->Address.High, dmap->addr.address64.High);
4770 		ddi_put32(acc_hdl, &ieeesge->Length,
4771 		    dmap->count);
4772 		NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4773 		flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4774 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4775 
4776 		/*
4777 		 * If this is the last cookie, we set the flags
4778 		 * to indicate so
4779 		 */
4780 		if (cookiec == 0) {
4781 			flags |= end_flag;
4782 		}
4783 
4784 		ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4785 		dmap++;
4786 		ieeesge++;
4787 	}
4788 }
4789 
4790 static void
mptsas_ieee_sge_chain(mptsas_t * mpt,mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl)4791 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4792     pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4793 {
4794 	pMpi2IeeeSgeSimple64_t	ieeesge;
4795 	pMpi25IeeeSgeChain64_t	ieeesgechain;
4796 	uint64_t		nframe_phys_addr;
4797 	uint_t			cookiec;
4798 	mptti_t			*dmap;
4799 	uint8_t			flags;
4800 
4801 	/*
4802 	 * Save the number of entries in the DMA
4803 	 * Scatter/Gather list
4804 	 */
4805 	cookiec = cmd->cmd_cookiec;
4806 
4807 	NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4808 
4809 	/*
4810 	 * Hereby we start to deal with multiple frames.
4811 	 * The process is as follows:
4812 	 * 1. Determine how many frames are needed for SGL element
4813 	 *    storage; Note that all frames are stored in contiguous
4814 	 *    memory space and in 64-bit DMA mode each element is
4815 	 *    4 double-words (16 bytes) long.
4816 	 * 2. Fill up the main frame. We need to do this separately
4817 	 *    since it contains the SCSI IO request header and needs
4818 	 *    dedicated processing. Note that the last 4 double-words
4819 	 *    of the SCSI IO header is for SGL element storage
4820 	 *    (MPI2_SGE_IO_UNION).
4821 	 * 3. Fill the chain element in the main frame, so the DMA
4822 	 *    engine can use the following frames.
4823 	 * 4. Enter a loop to fill the remaining frames. Note that the
4824 	 *    last frame contains no chain element.  The remaining
4825 	 *    frames go into the mpt SGL buffer allocated on the fly,
4826 	 *    not immediately following the main message frame, as in
4827 	 *    Gen1.
4828 	 * Restrictions:
4829 	 *    For 64-bit DMA, the simple element and chain element
4830 	 *    are both of 4 double-words (16 bytes) in size, even
4831 	 *    though all frames are stored in the first 4G of mem
4832 	 *    range and the higher 32-bits of the address are always 0.
4833 	 */
4834 	int			i, j, k, l, frames, sgemax;
4835 	int			temp;
4836 	uint8_t			chainflags;
4837 	uint32_t		chainlength;
4838 	mptsas_cache_frames_t	*p;
4839 
4840 	/*
4841 	 * Sgemax is the number of SGE's that will fit
4842 	 * each extra frame and frames is total
4843 	 * number of frames we'll need.  1 sge entry per
4844 	 * frame is reseverd for the chain element thus the -1 below.
4845 	 */
4846 	sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4847 	    - 1);
4848 	temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4849 
4850 	/*
4851 	 * A little check to see if we need to round up the number
4852 	 * of frames we need
4853 	 */
4854 	if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4855 	    sgemax) > 1) {
4856 		frames = (temp + 1);
4857 	} else {
4858 		frames = temp;
4859 	}
4860 	NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4861 	dmap = cmd->cmd_sg;
4862 	ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4863 
4864 	/*
4865 	 * First fill in the main frame
4866 	 */
4867 	j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4868 	mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4869 	dmap += j;
4870 	ieeesge += j;
4871 	j++;
4872 
4873 	/*
4874 	 * Fill in the chain element in the main frame.
4875 	 * About calculation on ChainOffset:
4876 	 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4877 	 *    in the end reserved for SGL element storage
4878 	 *    (MPI2_SGE_IO_UNION); we should count it in our
4879 	 *    calculation.  See its definition in the header file.
4880 	 * 2. Constant j is the counter of the current SGL element
4881 	 *    that will be processed, and (j - 1) is the number of
4882 	 *    SGL elements that have been processed (stored in the
4883 	 *    main frame).
4884 	 * 3. ChainOffset value should be in units of quad-words (16
4885 	 *    bytes) so the last value should be divided by 16.
4886 	 */
4887 	ddi_put8(acc_hdl, &frame->ChainOffset,
4888 	    (sizeof (MPI2_SCSI_IO_REQUEST) -
4889 	    sizeof (MPI2_SGE_IO_UNION) +
4890 	    (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4891 	ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4892 	chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4893 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4894 	ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4895 
4896 	/*
4897 	 * The size of the next frame is the accurate size of space
4898 	 * (in bytes) used to store the SGL elements. j is the counter
4899 	 * of SGL elements. (j - 1) is the number of SGL elements that
4900 	 * have been processed (stored in frames).
4901 	 */
4902 	if (frames >= 2) {
4903 		ASSERT(mpt->m_req_frame_size >=
4904 		    sizeof (MPI2_IEEE_SGE_SIMPLE64));
4905 		chainlength = mpt->m_req_frame_size /
4906 		    sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4907 		    sizeof (MPI2_IEEE_SGE_SIMPLE64);
4908 	} else {
4909 		chainlength = ((cookiec - (j - 1)) *
4910 		    sizeof (MPI2_IEEE_SGE_SIMPLE64));
4911 	}
4912 
4913 	p = cmd->cmd_extra_frames;
4914 
4915 	ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4916 	ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4917 	ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4918 
4919 	/*
4920 	 * If there are more than 2 frames left we have to
4921 	 * fill in the next chain offset to the location of
4922 	 * the chain element in the next frame.
4923 	 * sgemax is the number of simple elements in an extra
4924 	 * frame. Note that the value NextChainOffset should be
4925 	 * in double-words (4 bytes).
4926 	 */
4927 	if (frames >= 2) {
4928 		ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4929 		    (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4930 	} else {
4931 		ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4932 	}
4933 
4934 	/*
4935 	 * Jump to next frame;
4936 	 * Starting here, chain buffers go into the per command SGL.
4937 	 * This buffer is allocated when chain buffers are needed.
4938 	 */
4939 	ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4940 	i = cookiec;
4941 
4942 	/*
4943 	 * Start filling in frames with SGE's.  If we
4944 	 * reach the end of frame and still have SGE's
4945 	 * to fill we need to add a chain element and
4946 	 * use another frame.  j will be our counter
4947 	 * for what cookie we are at and i will be
4948 	 * the total cookiec. k is the current frame
4949 	 */
4950 	for (k = 1; k <= frames; k++) {
4951 		for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4952 
4953 			/*
4954 			 * If we have reached the end of frame
4955 			 * and we have more SGE's to fill in
4956 			 * we have to fill the final entry
4957 			 * with a chain element and then
4958 			 * continue to the next frame
4959 			 */
4960 			if ((l == (sgemax + 1)) && (k != frames)) {
4961 				ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4962 				j--;
4963 				chainflags =
4964 				    MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4965 				    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4966 				ddi_put8(p->m_acc_hdl,
4967 				    &ieeesgechain->Flags, chainflags);
4968 				/*
4969 				 * k is the frame counter and (k + 1)
4970 				 * is the number of the next frame.
4971 				 * Note that frames are in contiguous
4972 				 * memory space.
4973 				 */
4974 				nframe_phys_addr = p->m_phys_addr +
4975 				    (mpt->m_req_frame_size * k);
4976 				ddi_put32(p->m_acc_hdl,
4977 				    &ieeesgechain->Address.Low,
4978 				    nframe_phys_addr);
4979 				ddi_put32(p->m_acc_hdl,
4980 				    &ieeesgechain->Address.High,
4981 				    nframe_phys_addr >> 32);
4982 
4983 				/*
4984 				 * If there are more than 2 frames left
4985 				 * we have to next chain offset to
4986 				 * the location of the chain element
4987 				 * in the next frame and fill in the
4988 				 * length of the next chain
4989 				 */
4990 				if ((frames - k) >= 2) {
4991 					ddi_put8(p->m_acc_hdl,
4992 					    &ieeesgechain->NextChainOffset,
4993 					    (sgemax *
4994 					    sizeof (MPI2_IEEE_SGE_SIMPLE64))
4995 					    >> 4);
4996 					ASSERT(mpt->m_req_frame_size >=
4997 					    sizeof (MPI2_IEEE_SGE_SIMPLE64));
4998 					ddi_put32(p->m_acc_hdl,
4999 					    &ieeesgechain->Length,
5000 					    mpt->m_req_frame_size /
5001 					    sizeof (MPI2_IEEE_SGE_SIMPLE64) *
5002 					    sizeof (MPI2_IEEE_SGE_SIMPLE64));
5003 				} else {
5004 					/*
5005 					 * This is the last frame. Set
5006 					 * the NextChainOffset to 0 and
5007 					 * Length is the total size of
5008 					 * all remaining simple elements
5009 					 */
5010 					ddi_put8(p->m_acc_hdl,
5011 					    &ieeesgechain->NextChainOffset,
5012 					    0);
5013 					ddi_put32(p->m_acc_hdl,
5014 					    &ieeesgechain->Length,
5015 					    (cookiec - j) *
5016 					    sizeof (MPI2_IEEE_SGE_SIMPLE64));
5017 				}
5018 
5019 				/* Jump to the next frame */
5020 				ieeesge = (pMpi2IeeeSgeSimple64_t)
5021 				    ((char *)p->m_frames_addr +
5022 				    (int)mpt->m_req_frame_size * k);
5023 
5024 				continue;
5025 			}
5026 
5027 			ddi_put32(p->m_acc_hdl,
5028 			    &ieeesge->Address.Low,
5029 			    dmap->addr.address64.Low);
5030 			ddi_put32(p->m_acc_hdl,
5031 			    &ieeesge->Address.High,
5032 			    dmap->addr.address64.High);
5033 			ddi_put32(p->m_acc_hdl,
5034 			    &ieeesge->Length, dmap->count);
5035 			flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
5036 			    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
5037 
5038 			/*
5039 			 * If we are at the end of the frame and
5040 			 * there is another frame to fill in
5041 			 * do we need to do anything?
5042 			 * if ((l == sgemax) && (k != frames)) {
5043 			 * }
5044 			 */
5045 
5046 			/*
5047 			 * If this is the final cookie set end of list.
5048 			 */
5049 			if (j == i) {
5050 				flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
5051 			}
5052 
5053 			ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
5054 			dmap++;
5055 			ieeesge++;
5056 		}
5057 	}
5058 
5059 	/*
5060 	 * Sync DMA with the chain buffers that were just created
5061 	 */
5062 	(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
5063 }
5064 
5065 static void
mptsas_sge_setup(mptsas_t * mpt,mptsas_cmd_t * cmd,uint32_t * control,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl)5066 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
5067     pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
5068 {
5069 	ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
5070 
5071 	NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
5072 
5073 	/*
5074 	 * Set read/write bit in control.
5075 	 */
5076 	if (cmd->cmd_flags & CFLAG_DMASEND) {
5077 		*control |= MPI2_SCSIIO_CONTROL_WRITE;
5078 	} else {
5079 		*control |= MPI2_SCSIIO_CONTROL_READ;
5080 	}
5081 
5082 	ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
5083 
5084 	/*
5085 	 * We have 4 cases here.  First where we can fit all the
5086 	 * SG elements into the main frame, and the case
5087 	 * where we can't. The SG element is also different when using
5088 	 * MPI2.5 interface.
5089 	 * If we have more cookies than we can attach to a frame
5090 	 * we will need to use a chain element to point
5091 	 * a location of memory where the rest of the S/G
5092 	 * elements reside.
5093 	 */
5094 	if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
5095 		if (mpt->m_MPI25) {
5096 			mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
5097 			    cmd->cmd_cookiec,
5098 			    MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
5099 		} else {
5100 			mptsas_sge_mainframe(cmd, frame, acc_hdl,
5101 			    cmd->cmd_cookiec,
5102 			    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
5103 			    | MPI2_SGE_FLAGS_END_OF_BUFFER
5104 			    | MPI2_SGE_FLAGS_END_OF_LIST) <<
5105 			    MPI2_SGE_FLAGS_SHIFT));
5106 		}
5107 	} else {
5108 		if (mpt->m_MPI25) {
5109 			mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
5110 		} else {
5111 			mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
5112 		}
5113 	}
5114 }
5115 
5116 /*
5117  * Interrupt handling
5118  * Utility routine.  Poll for status of a command sent to HBA
5119  * without interrupts (a FLAG_NOINTR command).
5120  */
5121 int
mptsas_poll(mptsas_t * mpt,mptsas_cmd_t * poll_cmd,int polltime)5122 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
5123 {
5124 	int	rval = TRUE;
5125 
5126 	NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
5127 
5128 	if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
5129 		mptsas_restart_hba(mpt);
5130 	}
5131 
5132 	/*
5133 	 * Wait, using drv_usecwait(), long enough for the command to
5134 	 * reasonably return from the target if the target isn't
5135 	 * "dead".  A polled command may well be sent from scsi_poll, and
5136 	 * there are retries built in to scsi_poll if the transport
5137 	 * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
5138 	 * and retries the transport up to scsi_poll_busycnt times
5139 	 * (currently 60) if
5140 	 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
5141 	 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5142 	 *
5143 	 * limit the waiting to avoid a hang in the event that the
5144 	 * cmd never gets started but we are still receiving interrupts
5145 	 */
5146 	while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5147 		if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5148 			NDBG5(("mptsas_poll: command incomplete"));
5149 			rval = FALSE;
5150 			break;
5151 		}
5152 	}
5153 
5154 	if (rval == FALSE) {
5155 
5156 		/*
5157 		 * this isn't supposed to happen, the hba must be wedged
5158 		 * Mark this cmd as a timeout.
5159 		 */
5160 		mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5161 		    (STAT_TIMEOUT|STAT_ABORTED));
5162 
5163 		if (poll_cmd->cmd_queued == FALSE) {
5164 
5165 			NDBG5(("mptsas_poll: not on waitq"));
5166 
5167 			poll_cmd->cmd_pkt->pkt_state |=
5168 			    (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5169 		} else {
5170 
5171 			/* find and remove it from the waitq */
5172 			NDBG5(("mptsas_poll: delete from waitq"));
5173 			mptsas_waitq_delete(mpt, poll_cmd);
5174 		}
5175 
5176 	}
5177 	mptsas_fma_check(mpt, poll_cmd);
5178 	NDBG5(("mptsas_poll: done"));
5179 	return (rval);
5180 }
5181 
5182 /*
5183  * Used for polling cmds and TM function
5184  */
5185 static int
mptsas_wait_intr(mptsas_t * mpt,int polltime)5186 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5187 {
5188 	int				cnt;
5189 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5190 	uint32_t			int_mask;
5191 
5192 	NDBG5(("mptsas_wait_intr"));
5193 
5194 	mpt->m_polled_intr = 1;
5195 
5196 	/*
5197 	 * Get the current interrupt mask and disable interrupts.  When
5198 	 * re-enabling ints, set mask to saved value.
5199 	 */
5200 	int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5201 	MPTSAS_DISABLE_INTR(mpt);
5202 
5203 	/*
5204 	 * Keep polling for at least (polltime * 1000) seconds
5205 	 */
5206 	for (cnt = 0; cnt < polltime; cnt++) {
5207 		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5208 		    DDI_DMA_SYNC_FORCPU);
5209 
5210 		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5211 		    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5212 
5213 		if (ddi_get32(mpt->m_acc_post_queue_hdl,
5214 		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5215 		    ddi_get32(mpt->m_acc_post_queue_hdl,
5216 		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5217 			drv_usecwait(1000);
5218 			continue;
5219 		}
5220 
5221 		/*
5222 		 * The reply is valid, process it according to its
5223 		 * type.
5224 		 */
5225 		mptsas_process_intr(mpt, reply_desc_union);
5226 
5227 		if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5228 			mpt->m_post_index = 0;
5229 		}
5230 
5231 		/*
5232 		 * Update the global reply index
5233 		 */
5234 		ddi_put32(mpt->m_datap,
5235 		    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5236 		mpt->m_polled_intr = 0;
5237 
5238 		/*
5239 		 * Re-enable interrupts and quit.
5240 		 */
5241 		ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5242 		    int_mask);
5243 		return (TRUE);
5244 
5245 	}
5246 
5247 	/*
5248 	 * Clear polling flag, re-enable interrupts and quit.
5249 	 */
5250 	mpt->m_polled_intr = 0;
5251 	ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5252 	return (FALSE);
5253 }
5254 
5255 static void
mptsas_handle_scsi_io_success(mptsas_t * mpt,pMpi2ReplyDescriptorsUnion_t reply_desc)5256 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5257     pMpi2ReplyDescriptorsUnion_t reply_desc)
5258 {
5259 	pMpi2SCSIIOSuccessReplyDescriptor_t	scsi_io_success;
5260 	uint16_t				SMID;
5261 	mptsas_slots_t				*slots = mpt->m_active;
5262 	mptsas_cmd_t				*cmd = NULL;
5263 	struct scsi_pkt				*pkt;
5264 
5265 	ASSERT(mutex_owned(&mpt->m_mutex));
5266 
5267 	scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5268 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5269 
5270 	/*
5271 	 * This is a success reply so just complete the IO.  First, do a sanity
5272 	 * check on the SMID.  The final slot is used for TM requests, which
5273 	 * would not come into this reply handler.
5274 	 */
5275 	if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5276 		mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5277 		    SMID);
5278 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5279 		return;
5280 	}
5281 
5282 	cmd = slots->m_slot[SMID];
5283 
5284 	/*
5285 	 * print warning and return if the slot is empty
5286 	 */
5287 	if (cmd == NULL) {
5288 		mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5289 		    "in slot %d", SMID);
5290 		return;
5291 	}
5292 
5293 	pkt = CMD2PKT(cmd);
5294 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5295 	    STATE_GOT_STATUS);
5296 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
5297 		pkt->pkt_state |= STATE_XFERRED_DATA;
5298 	}
5299 	pkt->pkt_resid = 0;
5300 
5301 	if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5302 		cmd->cmd_flags |= CFLAG_FINISHED;
5303 		cv_broadcast(&mpt->m_passthru_cv);
5304 		return;
5305 	} else {
5306 		mptsas_remove_cmd(mpt, cmd);
5307 	}
5308 
5309 	if (cmd->cmd_flags & CFLAG_RETRY) {
5310 		/*
5311 		 * The target returned QFULL or busy, do not add tihs
5312 		 * pkt to the doneq since the hba will retry
5313 		 * this cmd.
5314 		 *
5315 		 * The pkt has already been resubmitted in
5316 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5317 		 * Remove this cmd_flag here.
5318 		 */
5319 		cmd->cmd_flags &= ~CFLAG_RETRY;
5320 	} else {
5321 		mptsas_doneq_add(mpt, cmd);
5322 	}
5323 }
5324 
5325 static void
mptsas_handle_address_reply(mptsas_t * mpt,pMpi2ReplyDescriptorsUnion_t reply_desc)5326 mptsas_handle_address_reply(mptsas_t *mpt,
5327     pMpi2ReplyDescriptorsUnion_t reply_desc)
5328 {
5329 	pMpi2AddressReplyDescriptor_t	address_reply;
5330 	pMPI2DefaultReply_t		reply;
5331 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
5332 	uint32_t			reply_addr, reply_frame_dma_baseaddr;
5333 	uint16_t			SMID, iocstatus;
5334 	mptsas_slots_t			*slots = mpt->m_active;
5335 	mptsas_cmd_t			*cmd = NULL;
5336 	uint8_t				function, buffer_type;
5337 	m_replyh_arg_t			*args;
5338 	int				reply_frame_no;
5339 
5340 	ASSERT(mutex_owned(&mpt->m_mutex));
5341 
5342 	address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5343 	reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5344 	    &address_reply->ReplyFrameAddress);
5345 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5346 
5347 	/*
5348 	 * If reply frame is not in the proper range we should ignore this
5349 	 * message and exit the interrupt handler.
5350 	 */
5351 	reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5352 	if ((reply_addr < reply_frame_dma_baseaddr) ||
5353 	    (reply_addr >= (reply_frame_dma_baseaddr +
5354 	    (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5355 	    ((reply_addr - reply_frame_dma_baseaddr) %
5356 	    mpt->m_reply_frame_size != 0)) {
5357 		mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5358 		    "address 0x%x\n", reply_addr);
5359 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5360 		return;
5361 	}
5362 
5363 	(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5364 	    DDI_DMA_SYNC_FORCPU);
5365 	reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5366 	    reply_frame_dma_baseaddr));
5367 	function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5368 
5369 	NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5370 	    function, reply_addr));
5371 
5372 	/*
5373 	 * don't get slot information and command for events since these values
5374 	 * don't exist
5375 	 */
5376 	if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5377 	    (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5378 		/*
5379 		 * This could be a TM reply, which use the last allocated SMID,
5380 		 * so allow for that.
5381 		 */
5382 		if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5383 			mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5384 			    "%d\n", SMID);
5385 			ddi_fm_service_impact(mpt->m_dip,
5386 			    DDI_SERVICE_UNAFFECTED);
5387 			return;
5388 		}
5389 
5390 		cmd = slots->m_slot[SMID];
5391 
5392 		/*
5393 		 * print warning and return if the slot is empty
5394 		 */
5395 		if (cmd == NULL) {
5396 			mptsas_log(mpt, CE_WARN, "?NULL command for address "
5397 			    "reply in slot %d", SMID);
5398 			return;
5399 		}
5400 		if ((cmd->cmd_flags &
5401 		    (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5402 			cmd->cmd_rfm = reply_addr;
5403 			cmd->cmd_flags |= CFLAG_FINISHED;
5404 			cv_broadcast(&mpt->m_passthru_cv);
5405 			cv_broadcast(&mpt->m_config_cv);
5406 			cv_broadcast(&mpt->m_fw_diag_cv);
5407 			return;
5408 		} else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5409 			mptsas_remove_cmd(mpt, cmd);
5410 		}
5411 		NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5412 	}
5413 	/*
5414 	 * Depending on the function, we need to handle
5415 	 * the reply frame (and cmd) differently.
5416 	 */
5417 	switch (function) {
5418 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
5419 		mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5420 		break;
5421 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
5422 		cmd->cmd_rfm = reply_addr;
5423 		mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5424 		    cmd);
5425 		break;
5426 	case MPI2_FUNCTION_FW_DOWNLOAD:
5427 		cmd->cmd_flags |= CFLAG_FINISHED;
5428 		cv_signal(&mpt->m_fw_cv);
5429 		break;
5430 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
5431 		reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5432 		    mpt->m_reply_frame_size;
5433 		args = &mpt->m_replyh_args[reply_frame_no];
5434 		args->mpt = (void *)mpt;
5435 		args->rfm = reply_addr;
5436 
5437 		/*
5438 		 * Record the event if its type is enabled in
5439 		 * this mpt instance by ioctl.
5440 		 */
5441 		mptsas_record_event(args);
5442 
5443 		/*
5444 		 * Handle time critical events
5445 		 * NOT_RESPONDING/ADDED only now
5446 		 */
5447 		if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5448 			/*
5449 			 * Would not return main process,
5450 			 * just let taskq resolve ack action
5451 			 * and ack would be sent in taskq thread
5452 			 */
5453 			NDBG20(("send mptsas_handle_event_sync success"));
5454 		}
5455 
5456 		if (mpt->m_in_reset) {
5457 			NDBG20(("dropping event received during reset"));
5458 			return;
5459 		}
5460 
5461 		if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5462 		    (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5463 			mptsas_log(mpt, CE_WARN, "No memory available"
5464 			"for dispatch taskq");
5465 			/*
5466 			 * Return the reply frame to the free queue.
5467 			 */
5468 			ddi_put32(mpt->m_acc_free_queue_hdl,
5469 			    &((uint32_t *)(void *)
5470 			    mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5471 			(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5472 			    DDI_DMA_SYNC_FORDEV);
5473 			if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5474 				mpt->m_free_index = 0;
5475 			}
5476 
5477 			ddi_put32(mpt->m_datap,
5478 			    &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5479 		}
5480 		return;
5481 	case MPI2_FUNCTION_DIAG_BUFFER_POST:
5482 		/*
5483 		 * If SMID is 0, this implies that the reply is due to a
5484 		 * release function with a status that the buffer has been
5485 		 * released.  Set the buffer flags accordingly.
5486 		 */
5487 		if (SMID == 0) {
5488 			iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5489 			    &reply->IOCStatus);
5490 			buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5491 			    &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5492 			if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5493 				pBuffer =
5494 				    &mpt->m_fw_diag_buffer_list[buffer_type];
5495 				pBuffer->valid_data = TRUE;
5496 				pBuffer->owned_by_firmware = FALSE;
5497 				pBuffer->immediate = FALSE;
5498 			}
5499 		} else {
5500 			/*
5501 			 * Normal handling of diag post reply with SMID.
5502 			 */
5503 			cmd = slots->m_slot[SMID];
5504 
5505 			/*
5506 			 * print warning and return if the slot is empty
5507 			 */
5508 			if (cmd == NULL) {
5509 				mptsas_log(mpt, CE_WARN, "?NULL command for "
5510 				    "address reply in slot %d", SMID);
5511 				return;
5512 			}
5513 			cmd->cmd_rfm = reply_addr;
5514 			cmd->cmd_flags |= CFLAG_FINISHED;
5515 			cv_broadcast(&mpt->m_fw_diag_cv);
5516 		}
5517 		return;
5518 	default:
5519 		mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5520 		break;
5521 	}
5522 
5523 	/*
5524 	 * Return the reply frame to the free queue.
5525 	 */
5526 	ddi_put32(mpt->m_acc_free_queue_hdl,
5527 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5528 	    reply_addr);
5529 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5530 	    DDI_DMA_SYNC_FORDEV);
5531 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5532 		mpt->m_free_index = 0;
5533 	}
5534 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5535 	    mpt->m_free_index);
5536 
5537 	if (cmd->cmd_flags & CFLAG_FW_CMD)
5538 		return;
5539 
5540 	if (cmd->cmd_flags & CFLAG_RETRY) {
5541 		/*
5542 		 * The target returned QFULL or busy, do not add this
5543 		 * pkt to the doneq since the hba will retry
5544 		 * this cmd.
5545 		 *
5546 		 * The pkt has already been resubmitted in
5547 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5548 		 * Remove this cmd_flag here.
5549 		 */
5550 		cmd->cmd_flags &= ~CFLAG_RETRY;
5551 	} else {
5552 		mptsas_doneq_add(mpt, cmd);
5553 	}
5554 }
5555 
5556 #ifdef MPTSAS_DEBUG
5557 static uint8_t mptsas_last_sense[256];
5558 #endif
5559 
5560 static void
mptsas_check_scsi_io_error(mptsas_t * mpt,pMpi2SCSIIOReply_t reply,mptsas_cmd_t * cmd)5561 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5562     mptsas_cmd_t *cmd)
5563 {
5564 	uint8_t			scsi_status, scsi_state;
5565 	uint16_t		ioc_status, cmd_rqs_len;
5566 	uint32_t		xferred, sensecount, responsedata, loginfo = 0;
5567 	struct scsi_pkt		*pkt;
5568 	struct scsi_arq_status	*arqstat;
5569 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
5570 	uint8_t			*sensedata = NULL;
5571 	uint64_t		sas_wwn;
5572 	uint8_t			phy;
5573 	char			wwn_str[MPTSAS_WWN_STRLEN];
5574 
5575 	scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5576 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5577 	scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5578 	xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5579 	sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5580 	responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5581 	    &reply->ResponseInfo);
5582 
5583 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5584 		sas_wwn = ptgt->m_addr.mta_wwn;
5585 		phy = ptgt->m_phynum;
5586 		if (sas_wwn == 0) {
5587 			(void) sprintf(wwn_str, "p%x", phy);
5588 		} else {
5589 			(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5590 		}
5591 		loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5592 		    &reply->IOCLogInfo);
5593 		mptsas_log(mpt, CE_NOTE,
5594 		    "?Log info 0x%x received for target %d %s.\n"
5595 		    "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5596 		    loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5597 		    scsi_state);
5598 	}
5599 
5600 	NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5601 	    scsi_status, ioc_status, scsi_state));
5602 
5603 	pkt = CMD2PKT(cmd);
5604 	*(pkt->pkt_scbp) = scsi_status;
5605 
5606 	if (loginfo == 0x31170000) {
5607 		/*
5608 		 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5609 		 * 0x31170000 comes, that means the device missing delay
5610 		 * is in progressing, the command need retry later.
5611 		 */
5612 		*(pkt->pkt_scbp) = STATUS_BUSY;
5613 		return;
5614 	}
5615 
5616 	if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5617 	    ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5618 	    MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5619 		pkt->pkt_reason = CMD_INCOMPLETE;
5620 		pkt->pkt_state |= STATE_GOT_BUS;
5621 		if (ptgt->m_reset_delay == 0) {
5622 			mptsas_set_throttle(mpt, ptgt,
5623 			    DRAIN_THROTTLE);
5624 		}
5625 		return;
5626 	}
5627 
5628 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5629 		responsedata &= 0x000000FF;
5630 		if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5631 			mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5632 			pkt->pkt_reason = CMD_TLR_OFF;
5633 			return;
5634 		}
5635 	}
5636 
5637 
5638 	switch (scsi_status) {
5639 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5640 		pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5641 		arqstat = (void*)(pkt->pkt_scbp);
5642 		arqstat->sts_rqpkt_status = *((struct scsi_status *)
5643 		    (pkt->pkt_scbp));
5644 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5645 		    STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5646 		if (cmd->cmd_flags & CFLAG_XARQ) {
5647 			pkt->pkt_state |= STATE_XARQ_DONE;
5648 		}
5649 		if (pkt->pkt_resid != cmd->cmd_dmacount) {
5650 			pkt->pkt_state |= STATE_XFERRED_DATA;
5651 		}
5652 		arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5653 		arqstat->sts_rqpkt_state  = pkt->pkt_state;
5654 		arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5655 		arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5656 		sensedata = (uint8_t *)&arqstat->sts_sensedata;
5657 		cmd_rqs_len = cmd->cmd_extrqslen ?
5658 		    cmd->cmd_extrqslen : cmd->cmd_rqslen;
5659 		(void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5660 		    DDI_DMA_SYNC_FORKERNEL);
5661 #ifdef MPTSAS_DEBUG
5662 		bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5663 		    ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5664 		    sizeof (mptsas_last_sense):cmd_rqs_len));
5665 #endif
5666 		bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5667 		    ((cmd_rqs_len >= sensecount) ? sensecount :
5668 		    cmd_rqs_len));
5669 		arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5670 		cmd->cmd_flags |= CFLAG_CMDARQ;
5671 		/*
5672 		 * Set proper status for pkt if autosense was valid
5673 		 */
5674 		if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5675 			struct scsi_status zero_status = { 0 };
5676 			arqstat->sts_rqpkt_status = zero_status;
5677 		}
5678 
5679 		/*
5680 		 * ASC=0x47 is parity error
5681 		 * ASC=0x48 is initiator detected error received
5682 		 */
5683 		if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5684 		    ((scsi_sense_asc(sensedata) == 0x47) ||
5685 		    (scsi_sense_asc(sensedata) == 0x48))) {
5686 			mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5687 		}
5688 
5689 		/*
5690 		 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5691 		 * ASC/ASCQ=0x25/0x00 means invalid lun
5692 		 */
5693 		if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5694 		    (scsi_sense_asc(sensedata) == 0x3F) &&
5695 		    (scsi_sense_ascq(sensedata) == 0x0E)) ||
5696 		    ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5697 		    (scsi_sense_asc(sensedata) == 0x25) &&
5698 		    (scsi_sense_ascq(sensedata) == 0x00))) {
5699 			mptsas_topo_change_list_t *topo_node = NULL;
5700 
5701 			topo_node = kmem_zalloc(
5702 			    sizeof (mptsas_topo_change_list_t),
5703 			    KM_NOSLEEP);
5704 			if (topo_node == NULL) {
5705 				mptsas_log(mpt, CE_NOTE, "No memory"
5706 				    "resource for handle SAS dynamic"
5707 				    "reconfigure.\n");
5708 				break;
5709 			}
5710 			topo_node->mpt = mpt;
5711 			topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5712 			topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5713 			topo_node->devhdl = ptgt->m_devhdl;
5714 			topo_node->object = (void *)ptgt;
5715 			topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5716 
5717 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5718 			    mptsas_handle_dr,
5719 			    (void *)topo_node,
5720 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
5721 				kmem_free(topo_node,
5722 				    sizeof (mptsas_topo_change_list_t));
5723 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5724 				    "for handle SAS dynamic reconfigure"
5725 				    "failed. \n");
5726 			}
5727 		}
5728 		break;
5729 	case MPI2_SCSI_STATUS_GOOD:
5730 		switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5731 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5732 			pkt->pkt_reason = CMD_DEV_GONE;
5733 			pkt->pkt_state |= STATE_GOT_BUS;
5734 			if (ptgt->m_reset_delay == 0) {
5735 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5736 			}
5737 			NDBG31(("lost disk for target%d, command:%x",
5738 			    Tgt(cmd), pkt->pkt_cdbp[0]));
5739 			break;
5740 		case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5741 			NDBG31(("data overrun: xferred=%d", xferred));
5742 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5743 			pkt->pkt_reason = CMD_DATA_OVR;
5744 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5745 			    | STATE_SENT_CMD | STATE_GOT_STATUS
5746 			    | STATE_XFERRED_DATA);
5747 			pkt->pkt_resid = 0;
5748 			break;
5749 		case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5750 		case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5751 			NDBG31(("data underrun: xferred=%d", xferred));
5752 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5753 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5754 			    | STATE_SENT_CMD | STATE_GOT_STATUS);
5755 			pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5756 			if (pkt->pkt_resid != cmd->cmd_dmacount) {
5757 				pkt->pkt_state |= STATE_XFERRED_DATA;
5758 			}
5759 			break;
5760 		case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5761 			if (cmd->cmd_active_expiration <= gethrtime()) {
5762 				/*
5763 				 * When timeout requested, propagate
5764 				 * proper reason and statistics to
5765 				 * target drivers.
5766 				 */
5767 				mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5768 				    STAT_BUS_RESET | STAT_TIMEOUT);
5769 			} else {
5770 				mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5771 				    STAT_BUS_RESET);
5772 			}
5773 			break;
5774 		case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5775 		case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5776 			mptsas_set_pkt_reason(mpt,
5777 			    cmd, CMD_RESET, STAT_DEV_RESET);
5778 			break;
5779 		case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5780 		case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5781 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5782 			mptsas_set_pkt_reason(mpt,
5783 			    cmd, CMD_TERMINATED, STAT_TERMINATED);
5784 			break;
5785 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5786 		case MPI2_IOCSTATUS_BUSY:
5787 			/*
5788 			 * set throttles to drain
5789 			 */
5790 			for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5791 			    ptgt = refhash_next(mpt->m_targets, ptgt)) {
5792 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5793 			}
5794 
5795 			/*
5796 			 * retry command
5797 			 */
5798 			cmd->cmd_flags |= CFLAG_RETRY;
5799 			cmd->cmd_pkt_flags |= FLAG_HEAD;
5800 
5801 			(void) mptsas_accept_pkt(mpt, cmd);
5802 			break;
5803 		default:
5804 			mptsas_log(mpt, CE_WARN,
5805 			    "unknown ioc_status = %x\n", ioc_status);
5806 			mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5807 			    "count = %x, scsi_status = %x", scsi_state,
5808 			    xferred, scsi_status);
5809 			break;
5810 		}
5811 		break;
5812 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5813 		mptsas_handle_qfull(mpt, cmd);
5814 		break;
5815 	case MPI2_SCSI_STATUS_BUSY:
5816 		NDBG31(("scsi_status busy received"));
5817 		break;
5818 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5819 		NDBG31(("scsi_status reservation conflict received"));
5820 		break;
5821 	default:
5822 		mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5823 		    scsi_status, ioc_status);
5824 		mptsas_log(mpt, CE_WARN,
5825 		    "mptsas_process_intr: invalid scsi status\n");
5826 		break;
5827 	}
5828 }
5829 
5830 static void
mptsas_check_task_mgt(mptsas_t * mpt,pMpi2SCSIManagementReply_t reply,mptsas_cmd_t * cmd)5831 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5832     mptsas_cmd_t *cmd)
5833 {
5834 	uint8_t		task_type;
5835 	uint16_t	ioc_status;
5836 	uint32_t	log_info;
5837 	uint16_t	dev_handle;
5838 	struct scsi_pkt *pkt = CMD2PKT(cmd);
5839 
5840 	task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5841 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5842 	log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5843 	dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5844 
5845 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5846 		mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5847 		    "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5848 		    task_type, ioc_status, log_info, dev_handle);
5849 		pkt->pkt_reason = CMD_INCOMPLETE;
5850 		return;
5851 	}
5852 
5853 	switch (task_type) {
5854 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5855 	case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5856 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5857 	case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5858 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5859 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5860 		break;
5861 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5862 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5863 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5864 		/*
5865 		 * Check for invalid DevHandle of 0 in case application
5866 		 * sends bad command.  DevHandle of 0 could cause problems.
5867 		 */
5868 		if (dev_handle == 0) {
5869 			mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5870 			    " DevHandle of 0.");
5871 		} else {
5872 			mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5873 			    task_type);
5874 		}
5875 		break;
5876 	default:
5877 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5878 		    task_type);
5879 		mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5880 		break;
5881 	}
5882 }
5883 
5884 static void
mptsas_doneq_thread(mptsas_doneq_thread_arg_t * arg)5885 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5886 {
5887 	mptsas_t			*mpt = arg->mpt;
5888 	uint64_t			t = arg->t;
5889 	mptsas_cmd_t			*cmd;
5890 	struct scsi_pkt			*pkt;
5891 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
5892 
5893 	mutex_enter(&item->mutex);
5894 	while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5895 		if (!item->doneq) {
5896 			cv_wait(&item->cv, &item->mutex);
5897 		}
5898 		pkt = NULL;
5899 		if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5900 			cmd->cmd_flags |= CFLAG_COMPLETED;
5901 			pkt = CMD2PKT(cmd);
5902 		}
5903 		mutex_exit(&item->mutex);
5904 		if (pkt) {
5905 			mptsas_pkt_comp(pkt, cmd);
5906 		}
5907 		mutex_enter(&item->mutex);
5908 	}
5909 	mutex_exit(&item->mutex);
5910 	mutex_enter(&mpt->m_doneq_mutex);
5911 	mpt->m_doneq_thread_n--;
5912 	cv_broadcast(&mpt->m_doneq_thread_cv);
5913 	mutex_exit(&mpt->m_doneq_mutex);
5914 }
5915 
5916 
5917 /*
5918  * mpt interrupt handler.
5919  */
5920 static uint_t
mptsas_intr(caddr_t arg1,caddr_t arg2)5921 mptsas_intr(caddr_t arg1, caddr_t arg2)
5922 {
5923 	mptsas_t			*mpt = (void *)arg1;
5924 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5925 	uchar_t				did_reply = FALSE;
5926 
5927 	NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5928 
5929 	mutex_enter(&mpt->m_mutex);
5930 
5931 	/*
5932 	 * If interrupts are shared by two channels then check whether this
5933 	 * interrupt is genuinely for this channel by making sure first the
5934 	 * chip is in high power state.
5935 	 */
5936 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
5937 	    (mpt->m_power_level != PM_LEVEL_D0)) {
5938 		mutex_exit(&mpt->m_mutex);
5939 		return (DDI_INTR_UNCLAIMED);
5940 	}
5941 
5942 	/*
5943 	 * If polling, interrupt was triggered by some shared interrupt because
5944 	 * IOC interrupts are disabled during polling, so polling routine will
5945 	 * handle any replies.  Considering this, if polling is happening,
5946 	 * return with interrupt unclaimed.
5947 	 */
5948 	if (mpt->m_polled_intr) {
5949 		mutex_exit(&mpt->m_mutex);
5950 		mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5951 		return (DDI_INTR_UNCLAIMED);
5952 	}
5953 
5954 	/*
5955 	 * Read the istat register.
5956 	 */
5957 	if ((INTPENDING(mpt)) != 0) {
5958 		/*
5959 		 * read fifo until empty.
5960 		 */
5961 #ifndef __lock_lint
5962 		_NOTE(CONSTCOND)
5963 #endif
5964 		while (TRUE) {
5965 			(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5966 			    DDI_DMA_SYNC_FORCPU);
5967 			reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5968 			    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5969 
5970 			if (ddi_get32(mpt->m_acc_post_queue_hdl,
5971 			    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5972 			    ddi_get32(mpt->m_acc_post_queue_hdl,
5973 			    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5974 				break;
5975 			}
5976 
5977 			/*
5978 			 * The reply is valid, process it according to its
5979 			 * type.  Also, set a flag for updating the reply index
5980 			 * after they've all been processed.
5981 			 */
5982 			did_reply = TRUE;
5983 
5984 			mptsas_process_intr(mpt, reply_desc_union);
5985 
5986 			/*
5987 			 * Increment post index and roll over if needed.
5988 			 */
5989 			if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5990 				mpt->m_post_index = 0;
5991 			}
5992 		}
5993 
5994 		/*
5995 		 * Update the global reply index if at least one reply was
5996 		 * processed.
5997 		 */
5998 		if (did_reply) {
5999 			ddi_put32(mpt->m_datap,
6000 			    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
6001 		}
6002 	} else {
6003 		mutex_exit(&mpt->m_mutex);
6004 		return (DDI_INTR_UNCLAIMED);
6005 	}
6006 	NDBG1(("mptsas_intr complete"));
6007 
6008 	/*
6009 	 * If no helper threads are created, process the doneq in ISR. If
6010 	 * helpers are created, use the doneq length as a metric to measure the
6011 	 * load on the interrupt CPU. If it is long enough, which indicates the
6012 	 * load is heavy, then we deliver the IO completions to the helpers.
6013 	 * This measurement has some limitations, although it is simple and
6014 	 * straightforward and works well for most of the cases at present.
6015 	 */
6016 	if (!mpt->m_doneq_thread_n ||
6017 	    (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
6018 		mptsas_doneq_empty(mpt);
6019 	} else {
6020 		mptsas_deliver_doneq_thread(mpt);
6021 	}
6022 
6023 	/*
6024 	 * If there are queued cmd, start them now.
6025 	 */
6026 	if (mpt->m_waitq != NULL) {
6027 		mptsas_restart_waitq(mpt);
6028 	}
6029 
6030 	mutex_exit(&mpt->m_mutex);
6031 	return (DDI_INTR_CLAIMED);
6032 }
6033 
6034 static void
mptsas_process_intr(mptsas_t * mpt,pMpi2ReplyDescriptorsUnion_t reply_desc_union)6035 mptsas_process_intr(mptsas_t *mpt,
6036     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6037 {
6038 	uint8_t	reply_type;
6039 
6040 	ASSERT(mutex_owned(&mpt->m_mutex));
6041 
6042 	/*
6043 	 * The reply is valid, process it according to its
6044 	 * type.  Also, set a flag for updated the reply index
6045 	 * after they've all been processed.
6046 	 */
6047 	reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6048 	    &reply_desc_union->Default.ReplyFlags);
6049 	reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6050 	if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6051 	    reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6052 		mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6053 	} else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6054 		mptsas_handle_address_reply(mpt, reply_desc_union);
6055 	} else {
6056 		mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
6057 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6058 	}
6059 
6060 	/*
6061 	 * Clear the reply descriptor for re-use and increment
6062 	 * index.
6063 	 */
6064 	ddi_put64(mpt->m_acc_post_queue_hdl,
6065 	    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6066 	    0xFFFFFFFFFFFFFFFF);
6067 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6068 	    DDI_DMA_SYNC_FORDEV);
6069 }
6070 
6071 /*
6072  * handle qfull condition
6073  */
6074 static void
mptsas_handle_qfull(mptsas_t * mpt,mptsas_cmd_t * cmd)6075 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6076 {
6077 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
6078 
6079 	if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
6080 	    (ptgt->m_qfull_retries == 0)) {
6081 		/*
6082 		 * We have exhausted the retries on QFULL, or,
6083 		 * the target driver has indicated that it
6084 		 * wants to handle QFULL itself by setting
6085 		 * qfull-retries capability to 0. In either case
6086 		 * we want the target driver's QFULL handling
6087 		 * to kick in. We do this by having pkt_reason
6088 		 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
6089 		 */
6090 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
6091 	} else {
6092 		if (ptgt->m_reset_delay == 0) {
6093 			ptgt->m_t_throttle =
6094 			    max((ptgt->m_t_ncmds - 2), 0);
6095 		}
6096 
6097 		cmd->cmd_pkt_flags |= FLAG_HEAD;
6098 		cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
6099 		cmd->cmd_flags |= CFLAG_RETRY;
6100 
6101 		(void) mptsas_accept_pkt(mpt, cmd);
6102 
6103 		/*
6104 		 * when target gives queue full status with no commands
6105 		 * outstanding (m_t_ncmds == 0), throttle is set to 0
6106 		 * (HOLD_THROTTLE), and the queue full handling start
6107 		 * (see psarc/1994/313); if there are commands outstanding,
6108 		 * throttle is set to (m_t_ncmds - 2)
6109 		 */
6110 		if (ptgt->m_t_throttle == HOLD_THROTTLE) {
6111 			/*
6112 			 * By setting throttle to QFULL_THROTTLE, we
6113 			 * avoid submitting new commands and in
6114 			 * mptsas_restart_cmd find out slots which need
6115 			 * their throttles to be cleared.
6116 			 */
6117 			mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
6118 			if (mpt->m_restart_cmd_timeid == 0) {
6119 				mpt->m_restart_cmd_timeid =
6120 				    timeout(mptsas_restart_cmd, mpt,
6121 				    ptgt->m_qfull_retry_interval);
6122 			}
6123 		}
6124 	}
6125 }
6126 
6127 mptsas_phymask_t
mptsas_physport_to_phymask(mptsas_t * mpt,uint8_t physport)6128 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
6129 {
6130 	mptsas_phymask_t	phy_mask = 0;
6131 	uint8_t			i = 0;
6132 
6133 	NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
6134 
6135 	ASSERT(mutex_owned(&mpt->m_mutex));
6136 
6137 	/*
6138 	 * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
6139 	 */
6140 	if (physport == 0xFF) {
6141 		return (0);
6142 	}
6143 
6144 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6145 		if (mpt->m_phy_info[i].attached_devhdl &&
6146 		    (mpt->m_phy_info[i].phy_mask != 0) &&
6147 		    (mpt->m_phy_info[i].port_num == physport)) {
6148 			phy_mask = mpt->m_phy_info[i].phy_mask;
6149 			break;
6150 		}
6151 	}
6152 	NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6153 	    mpt->m_instance, physport, phy_mask));
6154 	return (phy_mask);
6155 }
6156 
6157 /*
6158  * mpt free device handle after device gone, by use of passthrough
6159  */
6160 static int
mptsas_free_devhdl(mptsas_t * mpt,uint16_t devhdl)6161 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6162 {
6163 	Mpi2SasIoUnitControlRequest_t	req;
6164 	Mpi2SasIoUnitControlReply_t	rep;
6165 	int				ret;
6166 
6167 	ASSERT(mutex_owned(&mpt->m_mutex));
6168 
6169 	/*
6170 	 * Need to compose a SAS IO Unit Control request message
6171 	 * and call mptsas_do_passthru() function
6172 	 */
6173 	bzero(&req, sizeof (req));
6174 	bzero(&rep, sizeof (rep));
6175 
6176 	req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6177 	req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6178 	req.DevHandle = LE_16(devhdl);
6179 
6180 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6181 	    sizeof (req), sizeof (rep), 0, MPTSAS_PASS_THRU_DIRECTION_NONE,
6182 	    NULL, 0, 60, FKIOCTL);
6183 	if (ret != 0) {
6184 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6185 		    "Control error %d", ret);
6186 		return (DDI_FAILURE);
6187 	}
6188 
6189 	/* do passthrough success, check the ioc status */
6190 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6191 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6192 		    "Control IOCStatus %d", LE_16(rep.IOCStatus));
6193 		return (DDI_FAILURE);
6194 	}
6195 
6196 	return (DDI_SUCCESS);
6197 }
6198 
6199 /*
6200  * We have a SATA target that has changed, which means the "bridge-port"
6201  * property must be updated to reflect the SAS WWN of the new attachment point.
6202  * This may change if a SATA device changes which bay, and therefore phy, it is
6203  * plugged into. This SATA device may be a multipath virtual device or may be a
6204  * physical device. We have to handle both cases.
6205  */
6206 static boolean_t
mptsas_update_sata_bridge(mptsas_t * mpt,dev_info_t * parent,mptsas_target_t * ptgt)6207 mptsas_update_sata_bridge(mptsas_t *mpt, dev_info_t *parent,
6208     mptsas_target_t *ptgt)
6209 {
6210 	int			rval;
6211 	uint16_t		dev_hdl;
6212 	uint16_t		pdev_hdl;
6213 	uint64_t		dev_sas_wwn;
6214 	uint8_t			physport;
6215 	uint8_t			phy_id;
6216 	uint32_t		page_address;
6217 	uint16_t		bay_num, enclosure, io_flags;
6218 	uint32_t		dev_info;
6219 	char			uabuf[SCSI_WWN_BUFLEN];
6220 	dev_info_t		*dip;
6221 	mdi_pathinfo_t		*pip;
6222 
6223 	mutex_enter(&mpt->m_mutex);
6224 	page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6225 	    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)ptgt->m_devhdl;
6226 	rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
6227 	    &dev_sas_wwn, &dev_info, &physport, &phy_id, &pdev_hdl, &bay_num,
6228 	    &enclosure, &io_flags);
6229 	mutex_exit(&mpt->m_mutex);
6230 	if (rval != DDI_SUCCESS) {
6231 		mptsas_log(mpt, CE_WARN, "unable to get SAS page 0 for "
6232 		    "handle %d", page_address);
6233 		return (B_FALSE);
6234 	}
6235 
6236 	if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
6237 		mptsas_log(mpt, CE_WARN,
6238 		    "mptsas unable to format SATA bridge WWN");
6239 		return (B_FALSE);
6240 	}
6241 
6242 	if (mpt->m_mpxio_enable == TRUE && (pip = mptsas_find_path_addr(parent,
6243 	    ptgt->m_addr.mta_wwn, 0)) != NULL) {
6244 		if (mdi_prop_update_string(pip, SCSI_ADDR_PROP_BRIDGE_PORT,
6245 		    uabuf) != DDI_SUCCESS) {
6246 			mptsas_log(mpt, CE_WARN,
6247 			    "mptsas unable to create SCSI bridge port "
6248 			    "property for SATA device");
6249 			return (B_FALSE);
6250 		}
6251 		return (B_TRUE);
6252 	}
6253 
6254 	if ((dip = mptsas_find_child_addr(parent, ptgt->m_addr.mta_wwn,
6255 	    0)) != NULL) {
6256 		if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
6257 		    SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) != DDI_PROP_SUCCESS) {
6258 			mptsas_log(mpt, CE_WARN,
6259 			    "mptsas unable to create SCSI bridge port "
6260 			    "property for SATA device");
6261 			return (B_FALSE);
6262 		}
6263 		return (B_TRUE);
6264 	}
6265 
6266 	mptsas_log(mpt, CE_WARN, "mptsas failed to find dev_info_t or "
6267 	    "mdi_pathinfo_t for target with WWN %016" PRIx64,
6268 	    ptgt->m_addr.mta_wwn);
6269 
6270 	return (B_FALSE);
6271 }
6272 
6273 static void
mptsas_update_phymask(mptsas_t * mpt)6274 mptsas_update_phymask(mptsas_t *mpt)
6275 {
6276 	mptsas_phymask_t mask = 0, phy_mask;
6277 	char		*phy_mask_name;
6278 	uint8_t		current_port;
6279 	int		i, j;
6280 
6281 	NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6282 
6283 	ASSERT(mutex_owned(&mpt->m_mutex));
6284 
6285 	(void) mptsas_get_sas_io_unit_page(mpt);
6286 
6287 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6288 
6289 	for (i = 0; i < mpt->m_num_phys; i++) {
6290 		phy_mask = 0x00;
6291 
6292 		if (mpt->m_phy_info[i].attached_devhdl == 0)
6293 			continue;
6294 
6295 		bzero(phy_mask_name, sizeof (phy_mask_name));
6296 
6297 		current_port = mpt->m_phy_info[i].port_num;
6298 
6299 		if ((mask & (1 << i)) != 0)
6300 			continue;
6301 
6302 		for (j = 0; j < mpt->m_num_phys; j++) {
6303 			if (mpt->m_phy_info[j].attached_devhdl &&
6304 			    (mpt->m_phy_info[j].port_num == current_port)) {
6305 				phy_mask |= (1 << j);
6306 			}
6307 		}
6308 		mask = mask | phy_mask;
6309 
6310 		for (j = 0; j < mpt->m_num_phys; j++) {
6311 			if ((phy_mask >> j) & 0x01) {
6312 				mpt->m_phy_info[j].phy_mask = phy_mask;
6313 			}
6314 		}
6315 
6316 		(void) sprintf(phy_mask_name, "%x", phy_mask);
6317 
6318 		mutex_exit(&mpt->m_mutex);
6319 		/*
6320 		 * register a iport, if the port has already been existed
6321 		 * SCSA will do nothing and just return.
6322 		 */
6323 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6324 		mutex_enter(&mpt->m_mutex);
6325 	}
6326 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6327 	NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6328 }
6329 
6330 /*
6331  * mptsas_handle_dr is a task handler for DR, the DR action includes:
6332  * 1. Directly attched Device Added/Removed.
6333  * 2. Expander Device Added/Removed.
6334  * 3. Indirectly Attached Device Added/Expander.
6335  * 4. LUNs of a existing device status change.
6336  * 5. RAID volume created/deleted.
6337  * 6. Member of RAID volume is released because of RAID deletion.
6338  * 7. Physical disks are removed because of RAID creation.
6339  */
6340 static void
mptsas_handle_dr(void * args)6341 mptsas_handle_dr(void *args)
6342 {
6343 	mptsas_topo_change_list_t	*topo_node = NULL;
6344 	mptsas_topo_change_list_t	*save_node = NULL;
6345 	mptsas_t			*mpt;
6346 	dev_info_t			*parent = NULL;
6347 	mptsas_phymask_t		phymask = 0;
6348 	char				*phy_mask_name;
6349 	uint8_t				flags = 0, physport = 0xff;
6350 	uint8_t				port_update = 0;
6351 	uint_t				event;
6352 
6353 	topo_node = (mptsas_topo_change_list_t *)args;
6354 
6355 	mpt = topo_node->mpt;
6356 	event = topo_node->event;
6357 	flags = topo_node->flags;
6358 
6359 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6360 
6361 	NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6362 
6363 	switch (event) {
6364 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6365 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6366 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6367 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6368 			/*
6369 			 * Direct attached or expander attached device added
6370 			 * into system or a Phys Disk that is being unhidden.
6371 			 */
6372 			port_update = 1;
6373 		}
6374 		break;
6375 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
6376 		/*
6377 		 * New expander added into system, it must be the head
6378 		 * of topo_change_list_t
6379 		 */
6380 		port_update = 1;
6381 		break;
6382 	default:
6383 		port_update = 0;
6384 		break;
6385 	}
6386 	/*
6387 	 * All cases port_update == 1 may cause initiator port form change
6388 	 */
6389 	mutex_enter(&mpt->m_mutex);
6390 	if (mpt->m_port_chng && port_update) {
6391 		/*
6392 		 * mpt->m_port_chng flag indicates some PHYs of initiator
6393 		 * port have changed to online. So when expander added or
6394 		 * directly attached device online event come, we force to
6395 		 * update port information by issueing SAS IO Unit Page and
6396 		 * update PHYMASKs.
6397 		 */
6398 		(void) mptsas_update_phymask(mpt);
6399 		mpt->m_port_chng = 0;
6400 
6401 	}
6402 	mutex_exit(&mpt->m_mutex);
6403 	while (topo_node) {
6404 		phymask = 0;
6405 		if (parent == NULL) {
6406 			physport = topo_node->un.physport;
6407 			event = topo_node->event;
6408 			flags = topo_node->flags;
6409 			if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6410 			    MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6411 				/*
6412 				 * For all offline events, phymask is known
6413 				 */
6414 				phymask = topo_node->un.phymask;
6415 				goto find_parent;
6416 			}
6417 			if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6418 				goto handle_topo_change;
6419 			}
6420 			if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6421 				phymask = topo_node->un.phymask;
6422 				goto find_parent;
6423 			}
6424 
6425 			if ((flags ==
6426 			    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6427 			    (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6428 				/*
6429 				 * There is no any field in IR_CONFIG_CHANGE
6430 				 * event indicate physport/phynum, let's get
6431 				 * parent after SAS Device Page0 request.
6432 				 */
6433 				goto handle_topo_change;
6434 			}
6435 
6436 			mutex_enter(&mpt->m_mutex);
6437 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6438 				/*
6439 				 * If the direct attached device added or a
6440 				 * phys disk is being unhidden, argument
6441 				 * physport actually is PHY#, so we have to get
6442 				 * phymask according PHY#.
6443 				 */
6444 				physport = mpt->m_phy_info[physport].port_num;
6445 			}
6446 
6447 			/*
6448 			 * Translate physport to phymask so that we can search
6449 			 * parent dip.
6450 			 */
6451 			phymask = mptsas_physport_to_phymask(mpt,
6452 			    physport);
6453 			mutex_exit(&mpt->m_mutex);
6454 
6455 find_parent:
6456 			bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6457 			/*
6458 			 * For RAID topology change node, write the iport name
6459 			 * as v0.
6460 			 */
6461 			if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6462 				(void) sprintf(phy_mask_name, "v0");
6463 			} else {
6464 				/*
6465 				 * phymask can bo 0 if the drive has been
6466 				 * pulled by the time an add event is
6467 				 * processed.  If phymask is 0, just skip this
6468 				 * event and continue.
6469 				 */
6470 				if (phymask == 0) {
6471 					mutex_enter(&mpt->m_mutex);
6472 					save_node = topo_node;
6473 					topo_node = topo_node->next;
6474 					ASSERT(save_node);
6475 					kmem_free(save_node,
6476 					    sizeof (mptsas_topo_change_list_t));
6477 					mutex_exit(&mpt->m_mutex);
6478 
6479 					parent = NULL;
6480 					continue;
6481 				}
6482 				(void) sprintf(phy_mask_name, "%x", phymask);
6483 			}
6484 			parent = scsi_hba_iport_find(mpt->m_dip,
6485 			    phy_mask_name);
6486 			if (parent == NULL) {
6487 				mptsas_log(mpt, CE_WARN, "Failed to find an "
6488 				    "iport, should not happen!");
6489 				goto out;
6490 			}
6491 
6492 		}
6493 		ASSERT(parent);
6494 handle_topo_change:
6495 
6496 		mutex_enter(&mpt->m_mutex);
6497 		/*
6498 		 * If HBA is being reset, don't perform operations depending
6499 		 * on the IOC. We must free the topo list, however.
6500 		 */
6501 		if (!mpt->m_in_reset) {
6502 			mptsas_handle_topo_change(topo_node, parent);
6503 		} else {
6504 			NDBG20(("skipping topo change received during reset"));
6505 		}
6506 		save_node = topo_node;
6507 		topo_node = topo_node->next;
6508 		ASSERT(save_node);
6509 		kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6510 		mutex_exit(&mpt->m_mutex);
6511 
6512 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6513 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6514 		    (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6515 			/*
6516 			 * If direct attached device associated, make sure
6517 			 * reset the parent before start the next one. But
6518 			 * all devices associated with expander shares the
6519 			 * parent.  Also, reset parent if this is for RAID.
6520 			 */
6521 			parent = NULL;
6522 		}
6523 	}
6524 out:
6525 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6526 }
6527 
6528 static void
mptsas_handle_topo_change(mptsas_topo_change_list_t * topo_node,dev_info_t * parent)6529 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6530     dev_info_t *parent)
6531 {
6532 	mptsas_target_t	*ptgt = NULL;
6533 	mptsas_smp_t	*psmp = NULL;
6534 	mptsas_t	*mpt = (void *)topo_node->mpt;
6535 	uint16_t	devhdl;
6536 	uint16_t	attached_devhdl;
6537 	uint64_t	sas_wwn = 0;
6538 	int		rval = 0;
6539 	uint32_t	page_address;
6540 	uint8_t		phy, flags;
6541 	char		*addr = NULL;
6542 	dev_info_t	*lundip;
6543 	char		attached_wwnstr[MPTSAS_WWN_STRLEN];
6544 
6545 	NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6546 	    "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6547 	    topo_node->event, topo_node->flags));
6548 
6549 	ASSERT(mutex_owned(&mpt->m_mutex));
6550 
6551 	switch (topo_node->event) {
6552 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6553 	{
6554 		char *phy_mask_name;
6555 		mptsas_phymask_t phymask = 0;
6556 
6557 		if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6558 			/*
6559 			 * Get latest RAID info.
6560 			 */
6561 			(void) mptsas_get_raid_info(mpt);
6562 			ptgt = refhash_linear_search(mpt->m_targets,
6563 			    mptsas_target_eval_devhdl, &topo_node->devhdl);
6564 			if (ptgt == NULL)
6565 				break;
6566 		} else {
6567 			ptgt = (void *)topo_node->object;
6568 		}
6569 
6570 		if (ptgt == NULL) {
6571 			/*
6572 			 * If a Phys Disk was deleted, RAID info needs to be
6573 			 * updated to reflect the new topology.
6574 			 */
6575 			(void) mptsas_get_raid_info(mpt);
6576 
6577 			/*
6578 			 * Get sas device page 0 by DevHandle to make sure if
6579 			 * SSP/SATA end device exist.
6580 			 */
6581 			page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6582 			    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6583 			    topo_node->devhdl;
6584 
6585 			rval = mptsas_get_target_device_info(mpt, page_address,
6586 			    &devhdl, &ptgt);
6587 			if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6588 				mptsas_log(mpt, CE_NOTE,
6589 				    "mptsas_handle_topo_change: target %d is "
6590 				    "not a SAS/SATA device. \n",
6591 				    topo_node->devhdl);
6592 			} else if (rval == DEV_INFO_FAIL_ALLOC) {
6593 				mptsas_log(mpt, CE_NOTE,
6594 				    "mptsas_handle_topo_change: could not "
6595 				    "allocate memory. \n");
6596 			} else if (rval == DEV_INFO_FAIL_GUID) {
6597 				mptsas_log(mpt, CE_NOTE,
6598 				    "mptsas_handle_topo_change: could not "
6599 				    "get SATA GUID for target %d. \n",
6600 				    topo_node->devhdl);
6601 			}
6602 			/*
6603 			 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6604 			 * then there is nothing else to do, just leave.
6605 			 */
6606 			if (rval != DEV_INFO_SUCCESS) {
6607 				return;
6608 			}
6609 		}
6610 
6611 		ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6612 
6613 		mutex_exit(&mpt->m_mutex);
6614 		flags = topo_node->flags;
6615 
6616 		if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6617 			phymask = ptgt->m_addr.mta_phymask;
6618 			phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6619 			(void) sprintf(phy_mask_name, "%x", phymask);
6620 			parent = scsi_hba_iport_find(mpt->m_dip,
6621 			    phy_mask_name);
6622 			kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6623 			if (parent == NULL) {
6624 				mptsas_log(mpt, CE_WARN, "Failed to find a "
6625 				    "iport for PD, should not happen!");
6626 				mutex_enter(&mpt->m_mutex);
6627 				break;
6628 			}
6629 		}
6630 
6631 		if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6632 			ndi_devi_enter(parent);
6633 			(void) mptsas_config_raid(parent, topo_node->devhdl,
6634 			    &lundip);
6635 			ndi_devi_exit(parent);
6636 		} else {
6637 			/*
6638 			 * hold nexus for bus configure
6639 			 */
6640 			ndi_devi_enter(scsi_vhci_dip);
6641 			ndi_devi_enter(parent);
6642 			rval = mptsas_config_target(parent, ptgt);
6643 			/*
6644 			 * release nexus for bus configure
6645 			 */
6646 			ndi_devi_exit(parent);
6647 			ndi_devi_exit(scsi_vhci_dip);
6648 
6649 			/*
6650 			 * If this is a SATA device, make sure that the
6651 			 * bridge-port (the SAS WWN that the SATA device is
6652 			 * plugged into) is updated. This may change if a SATA
6653 			 * device changes which bay, and therefore phy, it is
6654 			 * plugged into.
6655 			 */
6656 			if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
6657 				if (!mptsas_update_sata_bridge(mpt, parent,
6658 				    ptgt)) {
6659 					mutex_enter(&mpt->m_mutex);
6660 					return;
6661 				}
6662 			}
6663 
6664 			/*
6665 			 * Add parent's props for SMHBA support
6666 			 */
6667 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6668 				bzero(attached_wwnstr,
6669 				    sizeof (attached_wwnstr));
6670 				(void) sprintf(attached_wwnstr, "w%016"PRIx64,
6671 				    ptgt->m_addr.mta_wwn);
6672 				if (ddi_prop_update_string(DDI_DEV_T_NONE,
6673 				    parent,
6674 				    SCSI_ADDR_PROP_ATTACHED_PORT,
6675 				    attached_wwnstr)
6676 				    != DDI_PROP_SUCCESS) {
6677 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6678 					    parent,
6679 					    SCSI_ADDR_PROP_ATTACHED_PORT);
6680 					mptsas_log(mpt, CE_WARN, "Failed to"
6681 					    "attached-port props");
6682 					mutex_enter(&mpt->m_mutex);
6683 					return;
6684 				}
6685 				if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6686 				    MPTSAS_NUM_PHYS, 1) !=
6687 				    DDI_PROP_SUCCESS) {
6688 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6689 					    parent, MPTSAS_NUM_PHYS);
6690 					mptsas_log(mpt, CE_WARN, "Failed to"
6691 					    " create num-phys props");
6692 					mutex_enter(&mpt->m_mutex);
6693 					return;
6694 				}
6695 
6696 				/*
6697 				 * Update PHY info for smhba
6698 				 */
6699 				mutex_enter(&mpt->m_mutex);
6700 				if (mptsas_smhba_phy_init(mpt)) {
6701 					mptsas_log(mpt, CE_WARN, "mptsas phy"
6702 					    " update failed");
6703 					return;
6704 				}
6705 				mutex_exit(&mpt->m_mutex);
6706 
6707 				/*
6708 				 * topo_node->un.physport is really the PHY#
6709 				 * for direct attached devices
6710 				 */
6711 				mptsas_smhba_set_one_phy_props(mpt, parent,
6712 				    topo_node->un.physport, &attached_devhdl);
6713 
6714 				if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6715 				    MPTSAS_VIRTUAL_PORT, 0) !=
6716 				    DDI_PROP_SUCCESS) {
6717 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6718 					    parent, MPTSAS_VIRTUAL_PORT);
6719 					mptsas_log(mpt, CE_WARN,
6720 					    "mptsas virtual-port"
6721 					    "port prop update failed");
6722 					mutex_enter(&mpt->m_mutex);
6723 					return;
6724 				}
6725 			}
6726 		}
6727 		mutex_enter(&mpt->m_mutex);
6728 
6729 		NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6730 		    "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6731 		    ptgt->m_addr.mta_phymask));
6732 		break;
6733 	}
6734 	case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6735 	{
6736 		devhdl = topo_node->devhdl;
6737 		ptgt = refhash_linear_search(mpt->m_targets,
6738 		    mptsas_target_eval_devhdl, &devhdl);
6739 		if (ptgt == NULL)
6740 			break;
6741 
6742 		sas_wwn = ptgt->m_addr.mta_wwn;
6743 		phy = ptgt->m_phynum;
6744 
6745 		addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6746 
6747 		if (sas_wwn) {
6748 			(void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6749 		} else {
6750 			(void) sprintf(addr, "p%x", phy);
6751 		}
6752 		ASSERT(ptgt->m_devhdl == devhdl);
6753 
6754 		if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6755 		    (topo_node->flags ==
6756 		    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6757 			/*
6758 			 * Get latest RAID info if RAID volume status changes
6759 			 * or Phys Disk status changes
6760 			 */
6761 			(void) mptsas_get_raid_info(mpt);
6762 		}
6763 		/*
6764 		 * Abort all outstanding command on the device
6765 		 */
6766 		rval = mptsas_do_scsi_reset(mpt, devhdl);
6767 		if (rval) {
6768 			NDBG20(("mptsas%d handle_topo_change to reset target "
6769 			    "before offline devhdl:%x, phymask:%x, rval:%x",
6770 			    mpt->m_instance, ptgt->m_devhdl,
6771 			    ptgt->m_addr.mta_phymask, rval));
6772 		}
6773 
6774 		mutex_exit(&mpt->m_mutex);
6775 
6776 		ndi_devi_enter(scsi_vhci_dip);
6777 		ndi_devi_enter(parent);
6778 		rval = mptsas_offline_target(parent, addr);
6779 		ndi_devi_exit(parent);
6780 		ndi_devi_exit(scsi_vhci_dip);
6781 		NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6782 		    "phymask:%x, rval:%x", mpt->m_instance,
6783 		    ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6784 
6785 		kmem_free(addr, SCSI_MAXNAMELEN);
6786 
6787 		/*
6788 		 * Clear parent's props for SMHBA support
6789 		 */
6790 		flags = topo_node->flags;
6791 		if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6792 			bzero(attached_wwnstr, sizeof (attached_wwnstr));
6793 			if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6794 			    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6795 			    DDI_PROP_SUCCESS) {
6796 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6797 				    SCSI_ADDR_PROP_ATTACHED_PORT);
6798 				mptsas_log(mpt, CE_WARN, "mptsas attached port "
6799 				    "prop update failed");
6800 				mutex_enter(&mpt->m_mutex);
6801 				break;
6802 			}
6803 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6804 			    MPTSAS_NUM_PHYS, 0) !=
6805 			    DDI_PROP_SUCCESS) {
6806 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6807 				    MPTSAS_NUM_PHYS);
6808 				mptsas_log(mpt, CE_WARN, "mptsas num phys "
6809 				    "prop update failed");
6810 				mutex_enter(&mpt->m_mutex);
6811 				break;
6812 			}
6813 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6814 			    MPTSAS_VIRTUAL_PORT, 1) !=
6815 			    DDI_PROP_SUCCESS) {
6816 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6817 				    MPTSAS_VIRTUAL_PORT);
6818 				mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6819 				    "prop update failed");
6820 				mutex_enter(&mpt->m_mutex);
6821 				break;
6822 			}
6823 		}
6824 
6825 		mutex_enter(&mpt->m_mutex);
6826 		if (rval == DDI_SUCCESS) {
6827 			refhash_remove(mpt->m_targets, ptgt);
6828 			ptgt = NULL;
6829 		} else {
6830 			/*
6831 			 * clean DR_INTRANSITION flag to allow I/O down to
6832 			 * PHCI driver since failover finished.
6833 			 * Invalidate the devhdl
6834 			 */
6835 			ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6836 			ptgt->m_tgt_unconfigured = 0;
6837 			mutex_enter(&mpt->m_tx_waitq_mutex);
6838 			ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6839 			mutex_exit(&mpt->m_tx_waitq_mutex);
6840 		}
6841 
6842 		/*
6843 		 * Send SAS IO Unit Control to free the dev handle
6844 		 */
6845 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6846 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6847 			rval = mptsas_free_devhdl(mpt, devhdl);
6848 
6849 			NDBG20(("mptsas%d handle_topo_change to remove "
6850 			    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6851 			    rval));
6852 		}
6853 
6854 		break;
6855 	}
6856 	case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6857 	{
6858 		devhdl = topo_node->devhdl;
6859 		/*
6860 		 * If this is the remove handle event, do a reset first.
6861 		 */
6862 		if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6863 			rval = mptsas_do_scsi_reset(mpt, devhdl);
6864 			if (rval) {
6865 				NDBG20(("mpt%d reset target before remove "
6866 				    "devhdl:%x, rval:%x", mpt->m_instance,
6867 				    devhdl, rval));
6868 			}
6869 		}
6870 
6871 		/*
6872 		 * Send SAS IO Unit Control to free the dev handle
6873 		 */
6874 		rval = mptsas_free_devhdl(mpt, devhdl);
6875 		NDBG20(("mptsas%d handle_topo_change to remove "
6876 		    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6877 		    rval));
6878 		break;
6879 	}
6880 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
6881 	{
6882 		mptsas_smp_t smp;
6883 		dev_info_t *smpdip;
6884 
6885 		devhdl = topo_node->devhdl;
6886 
6887 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6888 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6889 		rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6890 		if (rval != DDI_SUCCESS) {
6891 			mptsas_log(mpt, CE_WARN, "failed to online smp, "
6892 			    "handle %x", devhdl);
6893 			return;
6894 		}
6895 
6896 		psmp = mptsas_smp_alloc(mpt, &smp);
6897 		if (psmp == NULL) {
6898 			return;
6899 		}
6900 
6901 		mutex_exit(&mpt->m_mutex);
6902 		ndi_devi_enter(parent);
6903 		(void) mptsas_online_smp(parent, psmp, &smpdip);
6904 		ndi_devi_exit(parent);
6905 
6906 		mutex_enter(&mpt->m_mutex);
6907 		break;
6908 	}
6909 	case MPTSAS_DR_EVENT_OFFLINE_SMP:
6910 	{
6911 		devhdl = topo_node->devhdl;
6912 		uint32_t dev_info;
6913 
6914 		psmp = refhash_linear_search(mpt->m_smp_targets,
6915 		    mptsas_smp_eval_devhdl, &devhdl);
6916 		if (psmp == NULL)
6917 			break;
6918 		/*
6919 		 * The mptsas_smp_t data is released only if the dip is offlined
6920 		 * successfully.
6921 		 */
6922 		mutex_exit(&mpt->m_mutex);
6923 
6924 		ndi_devi_enter(parent);
6925 		rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6926 		ndi_devi_exit(parent);
6927 
6928 		dev_info = psmp->m_deviceinfo;
6929 		if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6930 		    DEVINFO_DIRECT_ATTACHED) {
6931 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6932 			    MPTSAS_VIRTUAL_PORT, 1) !=
6933 			    DDI_PROP_SUCCESS) {
6934 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6935 				    MPTSAS_VIRTUAL_PORT);
6936 				mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6937 				    "prop update failed");
6938 				mutex_enter(&mpt->m_mutex);
6939 				return;
6940 			}
6941 			/*
6942 			 * Check whether the smp connected to the iport,
6943 			 */
6944 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6945 			    MPTSAS_NUM_PHYS, 0) !=
6946 			    DDI_PROP_SUCCESS) {
6947 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6948 				    MPTSAS_NUM_PHYS);
6949 				mptsas_log(mpt, CE_WARN, "mptsas num phys"
6950 				    "prop update failed");
6951 				mutex_enter(&mpt->m_mutex);
6952 				return;
6953 			}
6954 			/*
6955 			 * Clear parent's attached-port props
6956 			 */
6957 			bzero(attached_wwnstr, sizeof (attached_wwnstr));
6958 			if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6959 			    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6960 			    DDI_PROP_SUCCESS) {
6961 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6962 				    SCSI_ADDR_PROP_ATTACHED_PORT);
6963 				mptsas_log(mpt, CE_WARN, "mptsas attached port "
6964 				    "prop update failed");
6965 				mutex_enter(&mpt->m_mutex);
6966 				return;
6967 			}
6968 		}
6969 
6970 		mutex_enter(&mpt->m_mutex);
6971 		NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6972 		    "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6973 		if (rval == DDI_SUCCESS) {
6974 			refhash_remove(mpt->m_smp_targets, psmp);
6975 		} else {
6976 			psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6977 		}
6978 
6979 		bzero(attached_wwnstr, sizeof (attached_wwnstr));
6980 
6981 		break;
6982 	}
6983 	default:
6984 		return;
6985 	}
6986 }
6987 
6988 /*
6989  * Record the event if its type is enabled in mpt instance by ioctl.
6990  */
6991 static void
mptsas_record_event(void * args)6992 mptsas_record_event(void *args)
6993 {
6994 	m_replyh_arg_t			*replyh_arg;
6995 	pMpi2EventNotificationReply_t	eventreply;
6996 	uint32_t			event, rfm;
6997 	mptsas_t			*mpt;
6998 	int				i, j;
6999 	uint16_t			event_data_len;
7000 	boolean_t			sendAEN = FALSE;
7001 
7002 	replyh_arg = (m_replyh_arg_t *)args;
7003 	rfm = replyh_arg->rfm;
7004 	mpt = replyh_arg->mpt;
7005 
7006 	eventreply = (pMpi2EventNotificationReply_t)
7007 	    (mpt->m_reply_frame + (rfm -
7008 	    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7009 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7010 
7011 
7012 	/*
7013 	 * Generate a system event to let anyone who cares know that a
7014 	 * LOG_ENTRY_ADDED event has occurred.  This is sent no matter what the
7015 	 * event mask is set to.
7016 	 */
7017 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
7018 		sendAEN = TRUE;
7019 	}
7020 
7021 	/*
7022 	 * Record the event only if it is not masked.  Determine which dword
7023 	 * and bit of event mask to test.
7024 	 */
7025 	i = (uint8_t)(event / 32);
7026 	j = (uint8_t)(event % 32);
7027 	if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
7028 		i = mpt->m_event_index;
7029 		mpt->m_events[i].Type = event;
7030 		mpt->m_events[i].Number = ++mpt->m_event_number;
7031 		bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
7032 		event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
7033 		    &eventreply->EventDataLength);
7034 
7035 		if (event_data_len > 0) {
7036 			/*
7037 			 * Limit data to size in m_event entry
7038 			 */
7039 			if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
7040 				event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
7041 			}
7042 			for (j = 0; j < event_data_len; j++) {
7043 				mpt->m_events[i].Data[j] =
7044 				    ddi_get32(mpt->m_acc_reply_frame_hdl,
7045 				    &(eventreply->EventData[j]));
7046 			}
7047 
7048 			/*
7049 			 * check for index wrap-around
7050 			 */
7051 			if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
7052 				i = 0;
7053 			}
7054 			mpt->m_event_index = (uint8_t)i;
7055 
7056 			/*
7057 			 * Set flag to send the event.
7058 			 */
7059 			sendAEN = TRUE;
7060 		}
7061 	}
7062 
7063 	/*
7064 	 * Generate a system event if flag is set to let anyone who cares know
7065 	 * that an event has occurred.
7066 	 */
7067 	if (sendAEN) {
7068 		(void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
7069 		    "SAS", NULL, NULL, DDI_NOSLEEP);
7070 	}
7071 }
7072 
7073 #define	SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
7074 /*
7075  * handle sync events from ioc in interrupt
7076  * return value:
7077  * DDI_SUCCESS: The event is handled by this func
7078  * DDI_FAILURE: Event is not handled
7079  */
7080 static int
mptsas_handle_event_sync(void * args)7081 mptsas_handle_event_sync(void *args)
7082 {
7083 	m_replyh_arg_t			*replyh_arg;
7084 	pMpi2EventNotificationReply_t	eventreply;
7085 	uint32_t			event, rfm;
7086 	mptsas_t			*mpt;
7087 	uint_t				iocstatus;
7088 
7089 	replyh_arg = (m_replyh_arg_t *)args;
7090 	rfm = replyh_arg->rfm;
7091 	mpt = replyh_arg->mpt;
7092 
7093 	ASSERT(mutex_owned(&mpt->m_mutex));
7094 
7095 	eventreply = (pMpi2EventNotificationReply_t)
7096 	    (mpt->m_reply_frame + (rfm -
7097 	    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7098 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7099 
7100 	if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7101 	    &eventreply->IOCStatus)) != 0) {
7102 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7103 			mptsas_log(mpt, CE_WARN,
7104 			    "!mptsas_handle_event_sync: event 0x%x, "
7105 			    "IOCStatus=0x%x, "
7106 			    "IOCLogInfo=0x%x", event, iocstatus,
7107 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7108 			    &eventreply->IOCLogInfo));
7109 		} else {
7110 			mptsas_log(mpt, CE_WARN,
7111 			    "mptsas_handle_event_sync: event 0x%x, "
7112 			    "IOCStatus=0x%x, "
7113 			    "(IOCLogInfo=0x%x)", event, iocstatus,
7114 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7115 			    &eventreply->IOCLogInfo));
7116 		}
7117 	}
7118 
7119 	/*
7120 	 * figure out what kind of event we got and handle accordingly
7121 	 */
7122 	switch (event) {
7123 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7124 	{
7125 		pMpi2EventDataSasTopologyChangeList_t	sas_topo_change_list;
7126 		uint8_t				num_entries, expstatus, phy;
7127 		uint8_t				phystatus, physport, state, i;
7128 		uint8_t				start_phy_num, link_rate;
7129 		uint16_t			dev_handle, reason_code;
7130 		uint16_t			enc_handle, expd_handle;
7131 		char				string[80], curr[80], prev[80];
7132 		mptsas_topo_change_list_t	*topo_head = NULL;
7133 		mptsas_topo_change_list_t	*topo_tail = NULL;
7134 		mptsas_topo_change_list_t	*topo_node = NULL;
7135 		mptsas_target_t			*ptgt;
7136 		mptsas_smp_t			*psmp;
7137 		uint8_t				flags = 0, exp_flag;
7138 		smhba_info_t			*pSmhba = NULL;
7139 
7140 		NDBG20(("mptsas_handle_event_sync: SAS topology change"));
7141 
7142 		sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
7143 		    eventreply->EventData;
7144 
7145 		enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7146 		    &sas_topo_change_list->EnclosureHandle);
7147 		expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7148 		    &sas_topo_change_list->ExpanderDevHandle);
7149 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7150 		    &sas_topo_change_list->NumEntries);
7151 		start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7152 		    &sas_topo_change_list->StartPhyNum);
7153 		expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7154 		    &sas_topo_change_list->ExpStatus);
7155 		physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
7156 		    &sas_topo_change_list->PhysicalPort);
7157 
7158 		string[0] = 0;
7159 		if (expd_handle) {
7160 			flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
7161 			switch (expstatus) {
7162 			case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7163 				(void) sprintf(string, " added");
7164 				/*
7165 				 * New expander device added
7166 				 */
7167 				mpt->m_port_chng = 1;
7168 				topo_node = kmem_zalloc(
7169 				    sizeof (mptsas_topo_change_list_t),
7170 				    KM_SLEEP);
7171 				topo_node->mpt = mpt;
7172 				topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
7173 				topo_node->un.physport = physport;
7174 				topo_node->devhdl = expd_handle;
7175 				topo_node->flags = flags;
7176 				topo_node->object = NULL;
7177 				if (topo_head == NULL) {
7178 					topo_head = topo_tail = topo_node;
7179 				} else {
7180 					topo_tail->next = topo_node;
7181 					topo_tail = topo_node;
7182 				}
7183 				break;
7184 			case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7185 				(void) sprintf(string, " not responding, "
7186 				    "removed");
7187 				psmp = refhash_linear_search(mpt->m_smp_targets,
7188 				    mptsas_smp_eval_devhdl, &expd_handle);
7189 				if (psmp == NULL)
7190 					break;
7191 
7192 				topo_node = kmem_zalloc(
7193 				    sizeof (mptsas_topo_change_list_t),
7194 				    KM_SLEEP);
7195 				topo_node->mpt = mpt;
7196 				topo_node->un.phymask =
7197 				    psmp->m_addr.mta_phymask;
7198 				topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
7199 				topo_node->devhdl = expd_handle;
7200 				topo_node->flags = flags;
7201 				topo_node->object = NULL;
7202 				if (topo_head == NULL) {
7203 					topo_head = topo_tail = topo_node;
7204 				} else {
7205 					topo_tail->next = topo_node;
7206 					topo_tail = topo_node;
7207 				}
7208 				break;
7209 			case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7210 				break;
7211 			case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7212 				(void) sprintf(string, " not responding, "
7213 				    "delaying removal");
7214 				break;
7215 			default:
7216 				break;
7217 			}
7218 		} else {
7219 			flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
7220 		}
7221 
7222 		NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
7223 		    enc_handle, expd_handle, string));
7224 		for (i = 0; i < num_entries; i++) {
7225 			phy = i + start_phy_num;
7226 			phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7227 			    &sas_topo_change_list->PHY[i].PhyStatus);
7228 			dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7229 			    &sas_topo_change_list->PHY[i].AttachedDevHandle);
7230 			reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
7231 			/*
7232 			 * Filter out processing of Phy Vacant Status unless
7233 			 * the reason code is "Not Responding".  Process all
7234 			 * other combinations of Phy Status and Reason Codes.
7235 			 */
7236 			if ((phystatus &
7237 			    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
7238 			    (reason_code !=
7239 			    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
7240 				continue;
7241 			}
7242 			curr[0] = 0;
7243 			prev[0] = 0;
7244 			string[0] = 0;
7245 			switch (reason_code) {
7246 			case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7247 			{
7248 				NDBG20(("mptsas%d phy %d physical_port %d "
7249 				    "dev_handle %d added", mpt->m_instance, phy,
7250 				    physport, dev_handle));
7251 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7252 				    &sas_topo_change_list->PHY[i].LinkRate);
7253 				state = (link_rate &
7254 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7255 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7256 				switch (state) {
7257 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7258 					(void) sprintf(curr, "is disabled");
7259 					break;
7260 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7261 					(void) sprintf(curr, "is offline, "
7262 					    "failed speed negotiation");
7263 					break;
7264 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7265 					(void) sprintf(curr, "SATA OOB "
7266 					    "complete");
7267 					break;
7268 				case SMP_RESET_IN_PROGRESS:
7269 					(void) sprintf(curr, "SMP reset in "
7270 					    "progress");
7271 					break;
7272 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7273 					(void) sprintf(curr, "is online at "
7274 					    "1.5 Gbps");
7275 					break;
7276 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7277 					(void) sprintf(curr, "is online at 3.0 "
7278 					    "Gbps");
7279 					break;
7280 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7281 					(void) sprintf(curr, "is online at 6.0 "
7282 					    "Gbps");
7283 					break;
7284 				case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7285 					(void) sprintf(curr,
7286 					    "is online at 12.0 Gbps");
7287 					break;
7288 				default:
7289 					(void) sprintf(curr, "state is "
7290 					    "unknown");
7291 					break;
7292 				}
7293 				/*
7294 				 * New target device added into the system.
7295 				 * Set association flag according to if an
7296 				 * expander is used or not.
7297 				 */
7298 				exp_flag =
7299 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7300 				if (flags ==
7301 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7302 					flags = exp_flag;
7303 				}
7304 				topo_node = kmem_zalloc(
7305 				    sizeof (mptsas_topo_change_list_t),
7306 				    KM_SLEEP);
7307 				topo_node->mpt = mpt;
7308 				topo_node->event =
7309 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7310 				if (expd_handle == 0) {
7311 					/*
7312 					 * Per MPI 2, if expander dev handle
7313 					 * is 0, it's a directly attached
7314 					 * device. So driver use PHY to decide
7315 					 * which iport is associated
7316 					 */
7317 					physport = phy;
7318 					mpt->m_port_chng = 1;
7319 				}
7320 				topo_node->un.physport = physport;
7321 				topo_node->devhdl = dev_handle;
7322 				topo_node->flags = flags;
7323 				topo_node->object = NULL;
7324 				if (topo_head == NULL) {
7325 					topo_head = topo_tail = topo_node;
7326 				} else {
7327 					topo_tail->next = topo_node;
7328 					topo_tail = topo_node;
7329 				}
7330 				break;
7331 			}
7332 			case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7333 			{
7334 				NDBG20(("mptsas%d phy %d physical_port %d "
7335 				    "dev_handle %d removed", mpt->m_instance,
7336 				    phy, physport, dev_handle));
7337 				/*
7338 				 * Set association flag according to if an
7339 				 * expander is used or not.
7340 				 */
7341 				exp_flag =
7342 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7343 				if (flags ==
7344 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7345 					flags = exp_flag;
7346 				}
7347 				/*
7348 				 * Target device is removed from the system
7349 				 * Before the device is really offline from
7350 				 * from system.
7351 				 */
7352 				ptgt = refhash_linear_search(mpt->m_targets,
7353 				    mptsas_target_eval_devhdl, &dev_handle);
7354 				/*
7355 				 * If ptgt is NULL here, it means that the
7356 				 * DevHandle is not in the hash table.  This is
7357 				 * reasonable sometimes.  For example, if a
7358 				 * disk was pulled, then added, then pulled
7359 				 * again, the disk will not have been put into
7360 				 * the hash table because the add event will
7361 				 * have an invalid phymask.  BUT, this does not
7362 				 * mean that the DevHandle is invalid.  The
7363 				 * controller will still have a valid DevHandle
7364 				 * that must be removed.  To do this, use the
7365 				 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7366 				 */
7367 				if (ptgt == NULL) {
7368 					topo_node = kmem_zalloc(
7369 					    sizeof (mptsas_topo_change_list_t),
7370 					    KM_SLEEP);
7371 					topo_node->mpt = mpt;
7372 					topo_node->un.phymask = 0;
7373 					topo_node->event =
7374 					    MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7375 					topo_node->devhdl = dev_handle;
7376 					topo_node->flags = flags;
7377 					topo_node->object = NULL;
7378 					if (topo_head == NULL) {
7379 						topo_head = topo_tail =
7380 						    topo_node;
7381 					} else {
7382 						topo_tail->next = topo_node;
7383 						topo_tail = topo_node;
7384 					}
7385 					break;
7386 				}
7387 
7388 				/*
7389 				 * Update DR flag immediately avoid I/O failure
7390 				 * before failover finish. Pay attention to the
7391 				 * mutex protect, we need grab m_tx_waitq_mutex
7392 				 * during set m_dr_flag because we won't add
7393 				 * the following command into waitq, instead,
7394 				 * we need return TRAN_BUSY in the tran_start
7395 				 * context.
7396 				 */
7397 				mutex_enter(&mpt->m_tx_waitq_mutex);
7398 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7399 				mutex_exit(&mpt->m_tx_waitq_mutex);
7400 
7401 				topo_node = kmem_zalloc(
7402 				    sizeof (mptsas_topo_change_list_t),
7403 				    KM_SLEEP);
7404 				topo_node->mpt = mpt;
7405 				topo_node->un.phymask =
7406 				    ptgt->m_addr.mta_phymask;
7407 				topo_node->event =
7408 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7409 				topo_node->devhdl = dev_handle;
7410 				topo_node->flags = flags;
7411 				topo_node->object = NULL;
7412 				if (topo_head == NULL) {
7413 					topo_head = topo_tail = topo_node;
7414 				} else {
7415 					topo_tail->next = topo_node;
7416 					topo_tail = topo_node;
7417 				}
7418 				break;
7419 			}
7420 			case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7421 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7422 				    &sas_topo_change_list->PHY[i].LinkRate);
7423 				state = (link_rate &
7424 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7425 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7426 				pSmhba = &mpt->m_phy_info[i].smhba_info;
7427 				pSmhba->negotiated_link_rate = state;
7428 				switch (state) {
7429 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7430 					(void) sprintf(curr, "is disabled");
7431 					mptsas_smhba_log_sysevent(mpt,
7432 					    ESC_SAS_PHY_EVENT,
7433 					    SAS_PHY_REMOVE,
7434 					    &mpt->m_phy_info[i].smhba_info);
7435 					mpt->m_phy_info[i].smhba_info.
7436 					    negotiated_link_rate
7437 					    = 0x1;
7438 					break;
7439 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7440 					(void) sprintf(curr, "is offline, "
7441 					    "failed speed negotiation");
7442 					mptsas_smhba_log_sysevent(mpt,
7443 					    ESC_SAS_PHY_EVENT,
7444 					    SAS_PHY_OFFLINE,
7445 					    &mpt->m_phy_info[i].smhba_info);
7446 					break;
7447 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7448 					(void) sprintf(curr, "SATA OOB "
7449 					    "complete");
7450 					break;
7451 				case SMP_RESET_IN_PROGRESS:
7452 					(void) sprintf(curr, "SMP reset in "
7453 					    "progress");
7454 					break;
7455 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7456 					(void) sprintf(curr, "is online at "
7457 					    "1.5 Gbps");
7458 					if ((expd_handle == 0) &&
7459 					    (enc_handle == 1)) {
7460 						mpt->m_port_chng = 1;
7461 					}
7462 					mptsas_smhba_log_sysevent(mpt,
7463 					    ESC_SAS_PHY_EVENT,
7464 					    SAS_PHY_ONLINE,
7465 					    &mpt->m_phy_info[i].smhba_info);
7466 					break;
7467 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7468 					(void) sprintf(curr, "is online at 3.0 "
7469 					    "Gbps");
7470 					if ((expd_handle == 0) &&
7471 					    (enc_handle == 1)) {
7472 						mpt->m_port_chng = 1;
7473 					}
7474 					mptsas_smhba_log_sysevent(mpt,
7475 					    ESC_SAS_PHY_EVENT,
7476 					    SAS_PHY_ONLINE,
7477 					    &mpt->m_phy_info[i].smhba_info);
7478 					break;
7479 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7480 					(void) sprintf(curr, "is online at "
7481 					    "6.0 Gbps");
7482 					if ((expd_handle == 0) &&
7483 					    (enc_handle == 1)) {
7484 						mpt->m_port_chng = 1;
7485 					}
7486 					mptsas_smhba_log_sysevent(mpt,
7487 					    ESC_SAS_PHY_EVENT,
7488 					    SAS_PHY_ONLINE,
7489 					    &mpt->m_phy_info[i].smhba_info);
7490 					break;
7491 				case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7492 					(void) sprintf(curr, "is online at "
7493 					    "12.0 Gbps");
7494 					if ((expd_handle == 0) &&
7495 					    (enc_handle == 1)) {
7496 						mpt->m_port_chng = 1;
7497 					}
7498 					mptsas_smhba_log_sysevent(mpt,
7499 					    ESC_SAS_PHY_EVENT,
7500 					    SAS_PHY_ONLINE,
7501 					    &mpt->m_phy_info[i].smhba_info);
7502 					break;
7503 				default:
7504 					(void) sprintf(curr, "state is "
7505 					    "unknown");
7506 					break;
7507 				}
7508 
7509 				state = (link_rate &
7510 				    MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7511 				    MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7512 				switch (state) {
7513 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7514 					(void) sprintf(prev, ", was disabled");
7515 					break;
7516 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7517 					(void) sprintf(prev, ", was offline, "
7518 					    "failed speed negotiation");
7519 					break;
7520 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7521 					(void) sprintf(prev, ", was SATA OOB "
7522 					    "complete");
7523 					break;
7524 				case SMP_RESET_IN_PROGRESS:
7525 					(void) sprintf(prev, ", was SMP reset "
7526 					    "in progress");
7527 					break;
7528 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7529 					(void) sprintf(prev, ", was online at "
7530 					    "1.5 Gbps");
7531 					break;
7532 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7533 					(void) sprintf(prev, ", was online at "
7534 					    "3.0 Gbps");
7535 					break;
7536 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7537 					(void) sprintf(prev, ", was online at "
7538 					    "6.0 Gbps");
7539 					break;
7540 				case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7541 					(void) sprintf(prev, ", was online at "
7542 					    "12.0 Gbps");
7543 					break;
7544 				default:
7545 				break;
7546 				}
7547 				(void) sprintf(&string[strlen(string)], "link "
7548 				    "changed, ");
7549 				break;
7550 			case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7551 				continue;
7552 			case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7553 				(void) sprintf(&string[strlen(string)],
7554 				    "target not responding, delaying "
7555 				    "removal");
7556 				break;
7557 			}
7558 			NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7559 			    mpt->m_instance, phy, dev_handle, string, curr,
7560 			    prev));
7561 		}
7562 		if (topo_head != NULL) {
7563 			/*
7564 			 * Launch DR taskq to handle topology change
7565 			 */
7566 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7567 			    mptsas_handle_dr, (void *)topo_head,
7568 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
7569 				while (topo_head != NULL) {
7570 					topo_node = topo_head;
7571 					topo_head = topo_head->next;
7572 					kmem_free(topo_node,
7573 					    sizeof (mptsas_topo_change_list_t));
7574 				}
7575 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7576 				    "for handle SAS DR event failed. \n");
7577 			}
7578 		}
7579 		break;
7580 	}
7581 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7582 	{
7583 		Mpi2EventDataIrConfigChangeList_t	*irChangeList;
7584 		mptsas_topo_change_list_t		*topo_head = NULL;
7585 		mptsas_topo_change_list_t		*topo_tail = NULL;
7586 		mptsas_topo_change_list_t		*topo_node = NULL;
7587 		mptsas_target_t				*ptgt;
7588 		uint8_t					num_entries, i, reason;
7589 		uint16_t				volhandle, diskhandle;
7590 
7591 		irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7592 		    eventreply->EventData;
7593 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7594 		    &irChangeList->NumElements);
7595 
7596 		NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7597 		    mpt->m_instance));
7598 
7599 		for (i = 0; i < num_entries; i++) {
7600 			reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7601 			    &irChangeList->ConfigElement[i].ReasonCode);
7602 			volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7603 			    &irChangeList->ConfigElement[i].VolDevHandle);
7604 			diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7605 			    &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7606 
7607 			switch (reason) {
7608 			case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7609 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7610 			{
7611 				NDBG20(("mptsas %d volume added\n",
7612 				    mpt->m_instance));
7613 
7614 				topo_node = kmem_zalloc(
7615 				    sizeof (mptsas_topo_change_list_t),
7616 				    KM_SLEEP);
7617 
7618 				topo_node->mpt = mpt;
7619 				topo_node->event =
7620 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7621 				topo_node->un.physport = 0xff;
7622 				topo_node->devhdl = volhandle;
7623 				topo_node->flags =
7624 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7625 				topo_node->object = NULL;
7626 				if (topo_head == NULL) {
7627 					topo_head = topo_tail = topo_node;
7628 				} else {
7629 					topo_tail->next = topo_node;
7630 					topo_tail = topo_node;
7631 				}
7632 				break;
7633 			}
7634 			case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7635 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7636 			{
7637 				NDBG20(("mptsas %d volume deleted\n",
7638 				    mpt->m_instance));
7639 				ptgt = refhash_linear_search(mpt->m_targets,
7640 				    mptsas_target_eval_devhdl, &volhandle);
7641 				if (ptgt == NULL)
7642 					break;
7643 
7644 				/*
7645 				 * Clear any flags related to volume
7646 				 */
7647 				(void) mptsas_delete_volume(mpt, volhandle);
7648 
7649 				/*
7650 				 * Update DR flag immediately avoid I/O failure
7651 				 */
7652 				mutex_enter(&mpt->m_tx_waitq_mutex);
7653 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7654 				mutex_exit(&mpt->m_tx_waitq_mutex);
7655 
7656 				topo_node = kmem_zalloc(
7657 				    sizeof (mptsas_topo_change_list_t),
7658 				    KM_SLEEP);
7659 				topo_node->mpt = mpt;
7660 				topo_node->un.phymask =
7661 				    ptgt->m_addr.mta_phymask;
7662 				topo_node->event =
7663 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7664 				topo_node->devhdl = volhandle;
7665 				topo_node->flags =
7666 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7667 				topo_node->object = (void *)ptgt;
7668 				if (topo_head == NULL) {
7669 					topo_head = topo_tail = topo_node;
7670 				} else {
7671 					topo_tail->next = topo_node;
7672 					topo_tail = topo_node;
7673 				}
7674 				break;
7675 			}
7676 			case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7677 			case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7678 			{
7679 				ptgt = refhash_linear_search(mpt->m_targets,
7680 				    mptsas_target_eval_devhdl, &diskhandle);
7681 				if (ptgt == NULL)
7682 					break;
7683 
7684 				/*
7685 				 * Update DR flag immediately avoid I/O failure
7686 				 */
7687 				mutex_enter(&mpt->m_tx_waitq_mutex);
7688 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7689 				mutex_exit(&mpt->m_tx_waitq_mutex);
7690 
7691 				topo_node = kmem_zalloc(
7692 				    sizeof (mptsas_topo_change_list_t),
7693 				    KM_SLEEP);
7694 				topo_node->mpt = mpt;
7695 				topo_node->un.phymask =
7696 				    ptgt->m_addr.mta_phymask;
7697 				topo_node->event =
7698 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7699 				topo_node->devhdl = diskhandle;
7700 				topo_node->flags =
7701 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7702 				topo_node->object = (void *)ptgt;
7703 				if (topo_head == NULL) {
7704 					topo_head = topo_tail = topo_node;
7705 				} else {
7706 					topo_tail->next = topo_node;
7707 					topo_tail = topo_node;
7708 				}
7709 				break;
7710 			}
7711 			case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7712 			case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7713 			{
7714 				/*
7715 				 * The physical drive is released by a IR
7716 				 * volume. But we cannot get the the physport
7717 				 * or phynum from the event data, so we only
7718 				 * can get the physport/phynum after SAS
7719 				 * Device Page0 request for the devhdl.
7720 				 */
7721 				topo_node = kmem_zalloc(
7722 				    sizeof (mptsas_topo_change_list_t),
7723 				    KM_SLEEP);
7724 				topo_node->mpt = mpt;
7725 				topo_node->un.phymask = 0;
7726 				topo_node->event =
7727 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7728 				topo_node->devhdl = diskhandle;
7729 				topo_node->flags =
7730 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7731 				topo_node->object = NULL;
7732 				mpt->m_port_chng = 1;
7733 				if (topo_head == NULL) {
7734 					topo_head = topo_tail = topo_node;
7735 				} else {
7736 					topo_tail->next = topo_node;
7737 					topo_tail = topo_node;
7738 				}
7739 				break;
7740 			}
7741 			default:
7742 				break;
7743 			}
7744 		}
7745 
7746 		if (topo_head != NULL) {
7747 			/*
7748 			 * Launch DR taskq to handle topology change
7749 			 */
7750 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7751 			    mptsas_handle_dr, (void *)topo_head,
7752 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
7753 				while (topo_head != NULL) {
7754 					topo_node = topo_head;
7755 					topo_head = topo_head->next;
7756 					kmem_free(topo_node,
7757 					    sizeof (mptsas_topo_change_list_t));
7758 				}
7759 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7760 				    "for handle SAS DR event failed. \n");
7761 			}
7762 		}
7763 		break;
7764 	}
7765 	default:
7766 		return (DDI_FAILURE);
7767 	}
7768 
7769 	return (DDI_SUCCESS);
7770 }
7771 
7772 /*
7773  * handle events from ioc
7774  */
7775 static void
mptsas_handle_event(void * args)7776 mptsas_handle_event(void *args)
7777 {
7778 	m_replyh_arg_t			*replyh_arg;
7779 	pMpi2EventNotificationReply_t	eventreply;
7780 	uint32_t			event, iocloginfo, rfm;
7781 	uint32_t			status;
7782 	uint8_t				port;
7783 	mptsas_t			*mpt;
7784 	uint_t				iocstatus;
7785 
7786 	replyh_arg = (m_replyh_arg_t *)args;
7787 	rfm = replyh_arg->rfm;
7788 	mpt = replyh_arg->mpt;
7789 
7790 	mutex_enter(&mpt->m_mutex);
7791 	/*
7792 	 * If HBA is being reset, drop incoming event.
7793 	 */
7794 	if (mpt->m_in_reset) {
7795 		NDBG20(("dropping event received prior to reset"));
7796 		mutex_exit(&mpt->m_mutex);
7797 		return;
7798 	}
7799 
7800 	eventreply = (pMpi2EventNotificationReply_t)
7801 	    (mpt->m_reply_frame + (rfm -
7802 	    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7803 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7804 
7805 	if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7806 	    &eventreply->IOCStatus)) != 0) {
7807 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7808 			mptsas_log(mpt, CE_WARN,
7809 			    "!mptsas_handle_event: IOCStatus=0x%x, "
7810 			    "IOCLogInfo=0x%x", iocstatus,
7811 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7812 			    &eventreply->IOCLogInfo));
7813 		} else {
7814 			mptsas_log(mpt, CE_WARN,
7815 			    "mptsas_handle_event: IOCStatus=0x%x, "
7816 			    "IOCLogInfo=0x%x", iocstatus,
7817 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7818 			    &eventreply->IOCLogInfo));
7819 		}
7820 	}
7821 
7822 	/*
7823 	 * figure out what kind of event we got and handle accordingly
7824 	 */
7825 	switch (event) {
7826 	case MPI2_EVENT_LOG_ENTRY_ADDED:
7827 		break;
7828 	case MPI2_EVENT_LOG_DATA:
7829 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7830 		    &eventreply->IOCLogInfo);
7831 		NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7832 		    iocloginfo));
7833 		break;
7834 	case MPI2_EVENT_STATE_CHANGE:
7835 		NDBG20(("mptsas%d state change.", mpt->m_instance));
7836 		break;
7837 	case MPI2_EVENT_HARD_RESET_RECEIVED:
7838 		NDBG20(("mptsas%d event change.", mpt->m_instance));
7839 		break;
7840 	case MPI2_EVENT_SAS_DISCOVERY:
7841 	{
7842 		MPI2_EVENT_DATA_SAS_DISCOVERY	*sasdiscovery;
7843 		char				string[80];
7844 		uint8_t				rc;
7845 
7846 		sasdiscovery =
7847 		    (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7848 
7849 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7850 		    &sasdiscovery->ReasonCode);
7851 		port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7852 		    &sasdiscovery->PhysicalPort);
7853 		status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7854 		    &sasdiscovery->DiscoveryStatus);
7855 
7856 		string[0] = 0;
7857 		switch (rc) {
7858 		case MPI2_EVENT_SAS_DISC_RC_STARTED:
7859 			(void) sprintf(string, "STARTING");
7860 			break;
7861 		case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7862 			(void) sprintf(string, "COMPLETED");
7863 			break;
7864 		default:
7865 			(void) sprintf(string, "UNKNOWN");
7866 			break;
7867 		}
7868 
7869 		NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7870 		    port, status));
7871 
7872 		break;
7873 	}
7874 	case MPI2_EVENT_EVENT_CHANGE:
7875 		NDBG20(("mptsas%d event change.", mpt->m_instance));
7876 		break;
7877 	case MPI2_EVENT_TASK_SET_FULL:
7878 	{
7879 		pMpi2EventDataTaskSetFull_t	taskfull;
7880 
7881 		taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7882 
7883 		NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7884 		    mpt->m_instance,  ddi_get16(mpt->m_acc_reply_frame_hdl,
7885 		    &taskfull->CurrentDepth)));
7886 		break;
7887 	}
7888 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7889 	{
7890 		/*
7891 		 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7892 		 * in mptsas_handle_event_sync() of interrupt context
7893 		 */
7894 		break;
7895 	}
7896 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7897 	{
7898 		pMpi2EventDataSasEnclDevStatusChange_t	encstatus;
7899 		uint8_t					rc;
7900 		uint16_t				enchdl;
7901 		char					string[80];
7902 		mptsas_enclosure_t			*mep;
7903 
7904 		encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7905 		    eventreply->EventData;
7906 
7907 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7908 		    &encstatus->ReasonCode);
7909 		enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7910 		    &encstatus->EnclosureHandle);
7911 
7912 		switch (rc) {
7913 		case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7914 			(void) sprintf(string, "added");
7915 			break;
7916 		case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7917 			mep = mptsas_enc_lookup(mpt, enchdl);
7918 			if (mep != NULL) {
7919 				list_remove(&mpt->m_enclosures, mep);
7920 				mptsas_enc_free(mep);
7921 				mep = NULL;
7922 			}
7923 			(void) sprintf(string, ", not responding");
7924 			break;
7925 		default:
7926 		break;
7927 		}
7928 		NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7929 		    "%x%s\n", mpt->m_instance,
7930 		    ddi_get16(mpt->m_acc_reply_frame_hdl,
7931 		    &encstatus->EnclosureHandle), string));
7932 
7933 		/*
7934 		 * No matter what has happened, update all of our device state
7935 		 * for enclosures, by retriggering an evaluation.
7936 		 */
7937 		mpt->m_done_traverse_enc = 0;
7938 		mptsas_update_hashtab(mpt);
7939 		break;
7940 	}
7941 
7942 	/*
7943 	 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7944 	 * mptsas_handle_event_sync,in here just send ack message.
7945 	 */
7946 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7947 	{
7948 		pMpi2EventDataSasDeviceStatusChange_t	statuschange;
7949 		uint8_t					rc;
7950 		uint16_t				devhdl;
7951 		uint64_t				wwn = 0;
7952 		uint32_t				wwn_lo, wwn_hi;
7953 
7954 		statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7955 		    eventreply->EventData;
7956 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7957 		    &statuschange->ReasonCode);
7958 		wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7959 		    (uint32_t *)(void *)&statuschange->SASAddress);
7960 		wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7961 		    (uint32_t *)(void *)&statuschange->SASAddress + 1);
7962 		wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7963 		devhdl =  ddi_get16(mpt->m_acc_reply_frame_hdl,
7964 		    &statuschange->DevHandle);
7965 
7966 		NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7967 		    wwn));
7968 
7969 		switch (rc) {
7970 		case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7971 			NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7972 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
7973 			    &statuschange->ASC),
7974 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
7975 			    &statuschange->ASCQ)));
7976 			break;
7977 
7978 		case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7979 			NDBG20(("Device not supported"));
7980 			break;
7981 
7982 		case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7983 			NDBG20(("IOC internally generated the Target Reset "
7984 			    "for devhdl:%x", devhdl));
7985 			break;
7986 
7987 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7988 			NDBG20(("IOC's internally generated Target Reset "
7989 			    "completed for devhdl:%x", devhdl));
7990 			break;
7991 
7992 		case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7993 			NDBG20(("IOC internally generated Abort Task"));
7994 			break;
7995 
7996 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7997 			NDBG20(("IOC's internally generated Abort Task "
7998 			    "completed"));
7999 			break;
8000 
8001 		case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8002 			NDBG20(("IOC internally generated Abort Task Set"));
8003 			break;
8004 
8005 		case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8006 			NDBG20(("IOC internally generated Clear Task Set"));
8007 			break;
8008 
8009 		case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
8010 			NDBG20(("IOC internally generated Query Task"));
8011 			break;
8012 
8013 		case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
8014 			NDBG20(("Device sent an Asynchronous Notification"));
8015 			break;
8016 
8017 		default:
8018 			break;
8019 		}
8020 		break;
8021 	}
8022 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
8023 	{
8024 		/*
8025 		 * IR TOPOLOGY CHANGE LIST Event has already been handled
8026 		 * in mpt_handle_event_sync() of interrupt context
8027 		 */
8028 		break;
8029 	}
8030 	case MPI2_EVENT_IR_OPERATION_STATUS:
8031 	{
8032 		Mpi2EventDataIrOperationStatus_t	*irOpStatus;
8033 		char					reason_str[80];
8034 		uint8_t					rc, percent;
8035 		uint16_t				handle;
8036 
8037 		irOpStatus = (pMpi2EventDataIrOperationStatus_t)
8038 		    eventreply->EventData;
8039 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8040 		    &irOpStatus->RAIDOperation);
8041 		percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
8042 		    &irOpStatus->PercentComplete);
8043 		handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8044 		    &irOpStatus->VolDevHandle);
8045 
8046 		switch (rc) {
8047 			case MPI2_EVENT_IR_RAIDOP_RESYNC:
8048 				(void) sprintf(reason_str, "resync");
8049 				break;
8050 			case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8051 				(void) sprintf(reason_str, "online capacity "
8052 				    "expansion");
8053 				break;
8054 			case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8055 				(void) sprintf(reason_str, "consistency check");
8056 				break;
8057 			default:
8058 				(void) sprintf(reason_str, "unknown reason %x",
8059 				    rc);
8060 		}
8061 
8062 		NDBG20(("mptsas%d raid operational status: (%s)"
8063 		    "\thandle(0x%04x), percent complete(%d)\n",
8064 		    mpt->m_instance, reason_str, handle, percent));
8065 		break;
8066 	}
8067 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
8068 	{
8069 		pMpi2EventDataSasBroadcastPrimitive_t	sas_broadcast;
8070 		uint8_t					phy_num;
8071 		uint8_t					primitive;
8072 
8073 		sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
8074 		    eventreply->EventData;
8075 
8076 		phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
8077 		    &sas_broadcast->PhyNum);
8078 		primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
8079 		    &sas_broadcast->Primitive);
8080 
8081 		switch (primitive) {
8082 		case MPI2_EVENT_PRIMITIVE_CHANGE:
8083 			mptsas_smhba_log_sysevent(mpt,
8084 			    ESC_SAS_HBA_PORT_BROADCAST,
8085 			    SAS_PORT_BROADCAST_CHANGE,
8086 			    &mpt->m_phy_info[phy_num].smhba_info);
8087 			break;
8088 		case MPI2_EVENT_PRIMITIVE_SES:
8089 			mptsas_smhba_log_sysevent(mpt,
8090 			    ESC_SAS_HBA_PORT_BROADCAST,
8091 			    SAS_PORT_BROADCAST_SES,
8092 			    &mpt->m_phy_info[phy_num].smhba_info);
8093 			break;
8094 		case MPI2_EVENT_PRIMITIVE_EXPANDER:
8095 			mptsas_smhba_log_sysevent(mpt,
8096 			    ESC_SAS_HBA_PORT_BROADCAST,
8097 			    SAS_PORT_BROADCAST_D01_4,
8098 			    &mpt->m_phy_info[phy_num].smhba_info);
8099 			break;
8100 		case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
8101 			mptsas_smhba_log_sysevent(mpt,
8102 			    ESC_SAS_HBA_PORT_BROADCAST,
8103 			    SAS_PORT_BROADCAST_D04_7,
8104 			    &mpt->m_phy_info[phy_num].smhba_info);
8105 			break;
8106 		case MPI2_EVENT_PRIMITIVE_RESERVED3:
8107 			mptsas_smhba_log_sysevent(mpt,
8108 			    ESC_SAS_HBA_PORT_BROADCAST,
8109 			    SAS_PORT_BROADCAST_D16_7,
8110 			    &mpt->m_phy_info[phy_num].smhba_info);
8111 			break;
8112 		case MPI2_EVENT_PRIMITIVE_RESERVED4:
8113 			mptsas_smhba_log_sysevent(mpt,
8114 			    ESC_SAS_HBA_PORT_BROADCAST,
8115 			    SAS_PORT_BROADCAST_D29_7,
8116 			    &mpt->m_phy_info[phy_num].smhba_info);
8117 			break;
8118 		case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
8119 			mptsas_smhba_log_sysevent(mpt,
8120 			    ESC_SAS_HBA_PORT_BROADCAST,
8121 			    SAS_PORT_BROADCAST_D24_0,
8122 			    &mpt->m_phy_info[phy_num].smhba_info);
8123 			break;
8124 		case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
8125 			mptsas_smhba_log_sysevent(mpt,
8126 			    ESC_SAS_HBA_PORT_BROADCAST,
8127 			    SAS_PORT_BROADCAST_D27_4,
8128 			    &mpt->m_phy_info[phy_num].smhba_info);
8129 			break;
8130 		default:
8131 			NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
8132 			    " %x received",
8133 			    mpt->m_instance, primitive));
8134 			break;
8135 		}
8136 		NDBG16(("mptsas%d sas broadcast primitive: "
8137 		    "\tprimitive(0x%04x), phy(%d) complete\n",
8138 		    mpt->m_instance, primitive, phy_num));
8139 		break;
8140 	}
8141 	case MPI2_EVENT_IR_VOLUME:
8142 	{
8143 		Mpi2EventDataIrVolume_t		*irVolume;
8144 		uint16_t			devhandle;
8145 		uint32_t			state;
8146 		int				config, vol;
8147 		uint8_t				found = FALSE;
8148 
8149 		irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
8150 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8151 		    &irVolume->NewValue);
8152 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8153 		    &irVolume->VolDevHandle);
8154 
8155 		NDBG20(("EVENT_IR_VOLUME event is received"));
8156 
8157 		/*
8158 		 * Get latest RAID info and then find the DevHandle for this
8159 		 * event in the configuration.  If the DevHandle is not found
8160 		 * just exit the event.
8161 		 */
8162 		(void) mptsas_get_raid_info(mpt);
8163 		for (config = 0; (config < mpt->m_num_raid_configs) &&
8164 		    (!found); config++) {
8165 			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
8166 				if (mpt->m_raidconfig[config].m_raidvol[vol].
8167 				    m_raidhandle == devhandle) {
8168 					found = TRUE;
8169 					break;
8170 				}
8171 			}
8172 		}
8173 		if (!found) {
8174 			break;
8175 		}
8176 
8177 		switch (irVolume->ReasonCode) {
8178 		case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8179 		{
8180 			uint32_t i;
8181 			mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8182 			    state;
8183 
8184 			i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8185 			mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8186 			    ", auto-config of hot-swap drives is %s"
8187 			    ", write caching is %s"
8188 			    ", hot-spare pool mask is %02x\n",
8189 			    vol, state &
8190 			    MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8191 			    ? "disabled" : "enabled",
8192 			    i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8193 			    ? "controlled by member disks" :
8194 			    i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8195 			    ? "disabled" :
8196 			    i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8197 			    ? "enabled" :
8198 			    "incorrectly set",
8199 			    (state >> 16) & 0xff);
8200 				break;
8201 		}
8202 		case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8203 		{
8204 			mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8205 			    (uint8_t)state;
8206 
8207 			mptsas_log(mpt, CE_NOTE,
8208 			    "Volume %d is now %s\n", vol,
8209 			    state == MPI2_RAID_VOL_STATE_OPTIMAL
8210 			    ? "optimal" :
8211 			    state == MPI2_RAID_VOL_STATE_DEGRADED
8212 			    ? "degraded" :
8213 			    state == MPI2_RAID_VOL_STATE_ONLINE
8214 			    ? "online" :
8215 			    state == MPI2_RAID_VOL_STATE_INITIALIZING
8216 			    ? "initializing" :
8217 			    state == MPI2_RAID_VOL_STATE_FAILED
8218 			    ? "failed" :
8219 			    state == MPI2_RAID_VOL_STATE_MISSING
8220 			    ? "missing" :
8221 			    "state unknown");
8222 			break;
8223 		}
8224 		case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8225 		{
8226 			mpt->m_raidconfig[config].m_raidvol[vol].
8227 			    m_statusflags = state;
8228 
8229 			mptsas_log(mpt, CE_NOTE,
8230 			    " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
8231 			    vol,
8232 			    state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8233 			    ? ", enabled" : ", disabled",
8234 			    state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8235 			    ? ", quiesced" : "",
8236 			    state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8237 			    ? ", inactive" : ", active",
8238 			    state &
8239 			    MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8240 			    ? ", bad block table is full" : "",
8241 			    state &
8242 			    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8243 			    ? ", resync in progress" : "",
8244 			    state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8245 			    ? ", background initialization in progress" : "",
8246 			    state &
8247 			    MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8248 			    ? ", capacity expansion in progress" : "",
8249 			    state &
8250 			    MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8251 			    ? ", consistency check in progress" : "",
8252 			    state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
8253 			    ? ", data scrub in progress" : "");
8254 			break;
8255 		}
8256 		default:
8257 			break;
8258 		}
8259 		break;
8260 	}
8261 	case MPI2_EVENT_IR_PHYSICAL_DISK:
8262 	{
8263 		Mpi2EventDataIrPhysicalDisk_t	*irPhysDisk;
8264 		uint16_t			devhandle, enchandle, slot;
8265 		uint32_t			status, state;
8266 		uint8_t				physdisknum, reason;
8267 
8268 		irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8269 		    eventreply->EventData;
8270 		physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8271 		    &irPhysDisk->PhysDiskNum);
8272 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8273 		    &irPhysDisk->PhysDiskDevHandle);
8274 		enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8275 		    &irPhysDisk->EnclosureHandle);
8276 		slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8277 		    &irPhysDisk->Slot);
8278 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8279 		    &irPhysDisk->NewValue);
8280 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8281 		    &irPhysDisk->ReasonCode);
8282 
8283 		NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8284 
8285 		switch (reason) {
8286 		case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8287 			mptsas_log(mpt, CE_NOTE,
8288 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8289 			    "for enclosure with handle 0x%x is now in hot "
8290 			    "spare pool %d",
8291 			    physdisknum, devhandle, slot, enchandle,
8292 			    (state >> 16) & 0xff);
8293 			break;
8294 
8295 		case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8296 			status = state;
8297 			mptsas_log(mpt, CE_NOTE,
8298 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8299 			    "for enclosure with handle 0x%x is now "
8300 			    "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8301 			    enchandle,
8302 			    status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8303 			    ? ", inactive" : ", active",
8304 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8305 			    ? ", out of sync" : "",
8306 			    status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8307 			    ? ", quiesced" : "",
8308 			    status &
8309 			    MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8310 			    ? ", write cache enabled" : "",
8311 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8312 			    ? ", capacity expansion target" : "");
8313 			break;
8314 
8315 		case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8316 			mptsas_log(mpt, CE_NOTE,
8317 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8318 			    "for enclosure with handle 0x%x is now %s\n",
8319 			    physdisknum, devhandle, slot, enchandle,
8320 			    state == MPI2_RAID_PD_STATE_OPTIMAL
8321 			    ? "optimal" :
8322 			    state == MPI2_RAID_PD_STATE_REBUILDING
8323 			    ? "rebuilding" :
8324 			    state == MPI2_RAID_PD_STATE_DEGRADED
8325 			    ? "degraded" :
8326 			    state == MPI2_RAID_PD_STATE_HOT_SPARE
8327 			    ? "a hot spare" :
8328 			    state == MPI2_RAID_PD_STATE_ONLINE
8329 			    ? "online" :
8330 			    state == MPI2_RAID_PD_STATE_OFFLINE
8331 			    ? "offline" :
8332 			    state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8333 			    ? "not compatible" :
8334 			    state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8335 			    ? "not configured" :
8336 			    "state unknown");
8337 			break;
8338 		}
8339 		break;
8340 	}
8341 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8342 	{
8343 		pMpi26EventDataActiveCableExcept_t	actcable;
8344 		uint32_t power;
8345 		uint8_t reason, id;
8346 
8347 		actcable = (pMpi26EventDataActiveCableExcept_t)
8348 		    eventreply->EventData;
8349 		power = ddi_get32(mpt->m_acc_reply_frame_hdl,
8350 		    &actcable->ActiveCablePowerRequirement);
8351 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8352 		    &actcable->ReasonCode);
8353 		id = ddi_get8(mpt->m_acc_reply_frame_hdl,
8354 		    &actcable->ReceptacleID);
8355 
8356 		/*
8357 		 * It'd be nice if this weren't just logging to the system but
8358 		 * were telling FMA about the active cable problem and FMA was
8359 		 * aware of the cable topology and state.
8360 		 */
8361 		switch (reason) {
8362 		case MPI26_EVENT_ACTIVE_CABLE_PRESENT:
8363 			/* Don't log anything if it's fine */
8364 			break;
8365 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8366 			mptsas_log(mpt, CE_WARN, "An active cable (id %u) does "
8367 			    "not have sufficient power to be enabled. "
8368 			    "Devices connected to this cable will not be "
8369 			    "visible to the system.", id);
8370 			if (power == UINT32_MAX) {
8371 				mptsas_log(mpt, CE_CONT, "The cable's power "
8372 				    "requirements are unknown.\n");
8373 			} else {
8374 				mptsas_log(mpt, CE_CONT, "The cable requires "
8375 				    "%u mW of power to function.\n", power);
8376 			}
8377 			break;
8378 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8379 			mptsas_log(mpt, CE_WARN, "An active cable (id %u) is "
8380 			    "degraded and not running at its full speed. "
8381 			    "Some devices might not appear.", id);
8382 			break;
8383 		default:
8384 			break;
8385 		}
8386 		break;
8387 	}
8388 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8389 	case MPI2_EVENT_PCIE_ENUMERATION:
8390 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
8391 	case MPI2_EVENT_PCIE_LINK_COUNTER:
8392 		mptsas_log(mpt, CE_NOTE, "Unhandled mpt_sas PCIe device "
8393 		    "event received (0x%x)", event);
8394 		break;
8395 	default:
8396 		NDBG20(("mptsas%d: unknown event %x received",
8397 		    mpt->m_instance, event));
8398 		break;
8399 	}
8400 
8401 	/*
8402 	 * Return the reply frame to the free queue.
8403 	 */
8404 	ddi_put32(mpt->m_acc_free_queue_hdl,
8405 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8406 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8407 	    DDI_DMA_SYNC_FORDEV);
8408 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8409 		mpt->m_free_index = 0;
8410 	}
8411 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8412 	    mpt->m_free_index);
8413 	mutex_exit(&mpt->m_mutex);
8414 }
8415 
8416 /*
8417  * invoked from timeout() to restart qfull cmds with throttle == 0
8418  */
8419 static void
mptsas_restart_cmd(void * arg)8420 mptsas_restart_cmd(void *arg)
8421 {
8422 	mptsas_t	*mpt = arg;
8423 	mptsas_target_t	*ptgt = NULL;
8424 
8425 	mutex_enter(&mpt->m_mutex);
8426 
8427 	mpt->m_restart_cmd_timeid = 0;
8428 
8429 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8430 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
8431 		if (ptgt->m_reset_delay == 0) {
8432 			if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8433 				mptsas_set_throttle(mpt, ptgt,
8434 				    MAX_THROTTLE);
8435 			}
8436 		}
8437 	}
8438 	mptsas_restart_hba(mpt);
8439 	mutex_exit(&mpt->m_mutex);
8440 }
8441 
8442 void
mptsas_remove_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)8443 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8444 {
8445 	int		slot;
8446 	mptsas_slots_t	*slots = mpt->m_active;
8447 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
8448 
8449 	ASSERT(cmd != NULL);
8450 	ASSERT(cmd->cmd_queued == FALSE);
8451 
8452 	/*
8453 	 * Task Management cmds are removed in their own routines.  Also,
8454 	 * we don't want to modify timeout based on TM cmds.
8455 	 */
8456 	if (cmd->cmd_flags & CFLAG_TM_CMD) {
8457 		return;
8458 	}
8459 
8460 	slot = cmd->cmd_slot;
8461 
8462 	/*
8463 	 * remove the cmd.
8464 	 */
8465 	if (cmd == slots->m_slot[slot]) {
8466 		NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8467 		    "0x%x", (void *)cmd, cmd->cmd_flags));
8468 		slots->m_slot[slot] = NULL;
8469 		mpt->m_ncmds--;
8470 
8471 		/*
8472 		 * only decrement per target ncmds if command
8473 		 * has a target associated with it.
8474 		 */
8475 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8476 			ptgt->m_t_ncmds--;
8477 			/*
8478 			 * reset throttle if we just ran an untagged command
8479 			 * to a tagged target
8480 			 */
8481 			if ((ptgt->m_t_ncmds == 0) &&
8482 			    ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8483 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8484 			}
8485 
8486 			/*
8487 			 * Remove this command from the active queue.
8488 			 */
8489 			if (cmd->cmd_active_expiration != 0) {
8490 				TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8491 				    cmd_active_link);
8492 				cmd->cmd_active_expiration = 0;
8493 			}
8494 		}
8495 	}
8496 
8497 	/*
8498 	 * This is all we need to do for ioc commands.
8499 	 */
8500 	if (cmd->cmd_flags & CFLAG_CMDIOC) {
8501 		mptsas_return_to_pool(mpt, cmd);
8502 		return;
8503 	}
8504 
8505 	ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8506 }
8507 
8508 /*
8509  * accept all cmds on the tx_waitq if any and then
8510  * start a fresh request from the top of the device queue.
8511  *
8512  * since there are always cmds queued on the tx_waitq, and rare cmds on
8513  * the instance waitq, so this function should not be invoked in the ISR,
8514  * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8515  * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8516  */
8517 static void
mptsas_restart_hba(mptsas_t * mpt)8518 mptsas_restart_hba(mptsas_t *mpt)
8519 {
8520 	ASSERT(mutex_owned(&mpt->m_mutex));
8521 
8522 	mutex_enter(&mpt->m_tx_waitq_mutex);
8523 	if (mpt->m_tx_waitq) {
8524 		mptsas_accept_tx_waitq(mpt);
8525 	}
8526 	mutex_exit(&mpt->m_tx_waitq_mutex);
8527 	mptsas_restart_waitq(mpt);
8528 }
8529 
8530 /*
8531  * start a fresh request from the top of the device queue
8532  */
8533 static void
mptsas_restart_waitq(mptsas_t * mpt)8534 mptsas_restart_waitq(mptsas_t *mpt)
8535 {
8536 	mptsas_cmd_t	*cmd, *next_cmd;
8537 	mptsas_target_t *ptgt = NULL;
8538 
8539 	NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8540 
8541 	ASSERT(mutex_owned(&mpt->m_mutex));
8542 
8543 	/*
8544 	 * If there is a reset delay, don't start any cmds.  Otherwise, start
8545 	 * as many cmds as possible.
8546 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8547 	 * commands is m_max_requests - 2.
8548 	 */
8549 	cmd = mpt->m_waitq;
8550 
8551 	while (cmd != NULL) {
8552 		next_cmd = cmd->cmd_linkp;
8553 		if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8554 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8555 				/*
8556 				 * passthru command get slot need
8557 				 * set CFLAG_PREPARED.
8558 				 */
8559 				cmd->cmd_flags |= CFLAG_PREPARED;
8560 				mptsas_waitq_delete(mpt, cmd);
8561 				mptsas_start_passthru(mpt, cmd);
8562 			}
8563 			cmd = next_cmd;
8564 			continue;
8565 		}
8566 		if (cmd->cmd_flags & CFLAG_CONFIG) {
8567 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8568 				/*
8569 				 * Send the config page request and delete it
8570 				 * from the waitq.
8571 				 */
8572 				cmd->cmd_flags |= CFLAG_PREPARED;
8573 				mptsas_waitq_delete(mpt, cmd);
8574 				mptsas_start_config_page_access(mpt, cmd);
8575 			}
8576 			cmd = next_cmd;
8577 			continue;
8578 		}
8579 		if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8580 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8581 				/*
8582 				 * Send the FW Diag request and delete if from
8583 				 * the waitq.
8584 				 */
8585 				cmd->cmd_flags |= CFLAG_PREPARED;
8586 				mptsas_waitq_delete(mpt, cmd);
8587 				mptsas_start_diag(mpt, cmd);
8588 			}
8589 			cmd = next_cmd;
8590 			continue;
8591 		}
8592 
8593 		ptgt = cmd->cmd_tgt_addr;
8594 		if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8595 		    (ptgt->m_t_ncmds == 0)) {
8596 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8597 		}
8598 		if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8599 		    (ptgt && (ptgt->m_reset_delay == 0)) &&
8600 		    (ptgt && (ptgt->m_t_ncmds <
8601 		    ptgt->m_t_throttle))) {
8602 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8603 				mptsas_waitq_delete(mpt, cmd);
8604 				(void) mptsas_start_cmd(mpt, cmd);
8605 			}
8606 		}
8607 		cmd = next_cmd;
8608 	}
8609 }
8610 /*
8611  * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8612  * Accept all those queued cmds before new cmd is accept so that the
8613  * cmds are sent in order.
8614  */
8615 static void
mptsas_accept_tx_waitq(mptsas_t * mpt)8616 mptsas_accept_tx_waitq(mptsas_t *mpt)
8617 {
8618 	mptsas_cmd_t *cmd;
8619 
8620 	ASSERT(mutex_owned(&mpt->m_mutex));
8621 	ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8622 
8623 	/*
8624 	 * A Bus Reset could occur at any time and flush the tx_waitq,
8625 	 * so we cannot count on the tx_waitq to contain even one cmd.
8626 	 * And when the m_tx_waitq_mutex is released and run
8627 	 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8628 	 */
8629 	cmd = mpt->m_tx_waitq;
8630 	for (;;) {
8631 		if ((cmd = mpt->m_tx_waitq) == NULL) {
8632 			mpt->m_tx_draining = 0;
8633 			break;
8634 		}
8635 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8636 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8637 		}
8638 		cmd->cmd_linkp = NULL;
8639 		mutex_exit(&mpt->m_tx_waitq_mutex);
8640 		if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8641 			cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8642 			    "to accept cmd on queue\n");
8643 		mutex_enter(&mpt->m_tx_waitq_mutex);
8644 	}
8645 }
8646 
8647 
8648 /*
8649  * mpt tag type lookup
8650  */
8651 static char mptsas_tag_lookup[] =
8652 	{0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8653 
8654 static int
mptsas_start_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)8655 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8656 {
8657 	struct scsi_pkt		*pkt = CMD2PKT(cmd);
8658 	uint32_t		control = 0;
8659 	caddr_t			mem, arsbuf;
8660 	pMpi2SCSIIORequest_t	io_request;
8661 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
8662 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
8663 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
8664 	uint16_t		SMID, io_flags = 0;
8665 	uint8_t			ars_size;
8666 	uint64_t		request_desc;
8667 	uint32_t		ars_dmaaddrlow;
8668 	mptsas_cmd_t		*c;
8669 
8670 	NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8671 	    cmd->cmd_flags));
8672 
8673 	/*
8674 	 * Set SMID and increment index.  Rollover to 1 instead of 0 if index
8675 	 * is at the max.  0 is an invalid SMID, so we call the first index 1.
8676 	 */
8677 	SMID = cmd->cmd_slot;
8678 
8679 	/*
8680 	 * It is possible for back to back device reset to
8681 	 * happen before the reset delay has expired.  That's
8682 	 * ok, just let the device reset go out on the bus.
8683 	 */
8684 	if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8685 		ASSERT(ptgt->m_reset_delay == 0);
8686 	}
8687 
8688 	/*
8689 	 * if a non-tagged cmd is submitted to an active tagged target
8690 	 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8691 	 * to be untagged
8692 	 */
8693 	if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8694 	    (ptgt->m_t_ncmds > 1) &&
8695 	    ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8696 	    (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8697 		if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8698 			NDBG23(("target=%d, untagged cmd, start draining\n",
8699 			    ptgt->m_devhdl));
8700 
8701 			if (ptgt->m_reset_delay == 0) {
8702 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8703 			}
8704 
8705 			mptsas_remove_cmd(mpt, cmd);
8706 			cmd->cmd_pkt_flags |= FLAG_HEAD;
8707 			mptsas_waitq_add(mpt, cmd);
8708 		}
8709 		return (DDI_FAILURE);
8710 	}
8711 
8712 	/*
8713 	 * Set correct tag bits.
8714 	 */
8715 	if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8716 		switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8717 		    FLAG_TAGMASK) >> 12)]) {
8718 		case MSG_SIMPLE_QTAG:
8719 			control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8720 			break;
8721 		case MSG_HEAD_QTAG:
8722 			control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8723 			break;
8724 		case MSG_ORDERED_QTAG:
8725 			control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8726 			break;
8727 		default:
8728 			mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8729 			break;
8730 		}
8731 	} else {
8732 		if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8733 				ptgt->m_t_throttle = 1;
8734 		}
8735 		control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8736 	}
8737 
8738 	if (cmd->cmd_pkt_flags & FLAG_TLR) {
8739 		control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8740 	}
8741 
8742 	mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8743 	io_request = (pMpi2SCSIIORequest_t)mem;
8744 	if (cmd->cmd_extrqslen != 0) {
8745 		/*
8746 		 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8747 		 * Calculate the DMA address with the same offset.
8748 		 */
8749 		arsbuf = cmd->cmd_arq_buf;
8750 		ars_size = cmd->cmd_extrqslen;
8751 		ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8752 		    ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8753 		    0xffffffffu;
8754 	} else {
8755 		arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8756 		cmd->cmd_arq_buf = arsbuf;
8757 		ars_size = mpt->m_req_sense_size;
8758 		ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8759 		    (mpt->m_req_sense_size * (SMID-1))) &
8760 		    0xffffffffu;
8761 	}
8762 	bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8763 	bzero(arsbuf, ars_size);
8764 
8765 	ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8766 	    (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8767 	mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8768 	    MPI2_FUNCTION_SCSI_IO_REQUEST);
8769 
8770 	(void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8771 	    io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8772 
8773 	io_flags = cmd->cmd_cdblen;
8774 	if (mptsas_use_fastpath &&
8775 	    ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8776 		io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8777 		request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8778 	} else {
8779 		request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8780 	}
8781 	ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8782 	/*
8783 	 * setup the Scatter/Gather DMA list for this request
8784 	 */
8785 	if (cmd->cmd_cookiec > 0) {
8786 		mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8787 	} else {
8788 		ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8789 		    ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8790 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
8791 		    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8792 		    MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8793 	}
8794 
8795 	/*
8796 	 * save ARQ information
8797 	 */
8798 	ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8799 	ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8800 
8801 	ddi_put32(acc_hdl, &io_request->Control, control);
8802 
8803 	NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8804 	    SMID, (void *)io_request, (void *)cmd));
8805 
8806 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8807 	(void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8808 	    DDI_DMA_SYNC_FORDEV);
8809 
8810 	/*
8811 	 * Build request descriptor and write it to the request desc post reg.
8812 	 */
8813 	request_desc |= (SMID << 16);
8814 	request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8815 	MPTSAS_START_CMD(mpt, request_desc);
8816 
8817 	/*
8818 	 * Start timeout.
8819 	 */
8820 	cmd->cmd_active_expiration =
8821 	    gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8822 #ifdef MPTSAS_TEST
8823 	/*
8824 	 * Force timeouts to happen immediately.
8825 	 */
8826 	if (mptsas_test_timeouts)
8827 		cmd->cmd_active_expiration = gethrtime();
8828 #endif
8829 	c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8830 	if (c == NULL ||
8831 	    c->cmd_active_expiration < cmd->cmd_active_expiration) {
8832 		/*
8833 		 * Common case is that this is the last pending expiration
8834 		 * (or queue is empty). Insert at head of the queue.
8835 		 */
8836 		TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8837 	} else {
8838 		/*
8839 		 * Queue is not empty and first element expires later than
8840 		 * this command. Search for element expiring sooner.
8841 		 */
8842 		while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8843 			if (c->cmd_active_expiration <
8844 			    cmd->cmd_active_expiration) {
8845 				TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8846 				break;
8847 			}
8848 		}
8849 		if (c == NULL) {
8850 			/*
8851 			 * No element found expiring sooner, append to
8852 			 * non-empty queue.
8853 			 */
8854 			TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8855 			    cmd_active_link);
8856 		}
8857 	}
8858 
8859 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8860 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8861 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8862 		return (DDI_FAILURE);
8863 	}
8864 	return (DDI_SUCCESS);
8865 }
8866 
8867 /*
8868  * Select a helper thread to handle current doneq
8869  */
8870 static void
mptsas_deliver_doneq_thread(mptsas_t * mpt)8871 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8872 {
8873 	uint64_t			t, i;
8874 	uint32_t			min = 0xffffffff;
8875 	mptsas_doneq_thread_list_t	*item;
8876 
8877 	for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8878 		item = &mpt->m_doneq_thread_id[i];
8879 		/*
8880 		 * If the completed command on help thread[i] less than
8881 		 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8882 		 * pick a thread which has least completed command.
8883 		 */
8884 
8885 		mutex_enter(&item->mutex);
8886 		if (item->len < mpt->m_doneq_thread_threshold) {
8887 			t = i;
8888 			mutex_exit(&item->mutex);
8889 			break;
8890 		}
8891 		if (item->len < min) {
8892 			min = item->len;
8893 			t = i;
8894 		}
8895 		mutex_exit(&item->mutex);
8896 	}
8897 	mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8898 	mptsas_doneq_mv(mpt, t);
8899 	cv_signal(&mpt->m_doneq_thread_id[t].cv);
8900 	mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8901 }
8902 
8903 /*
8904  * move the current global doneq to the doneq of thead[t]
8905  */
8906 static void
mptsas_doneq_mv(mptsas_t * mpt,uint64_t t)8907 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8908 {
8909 	mptsas_cmd_t			*cmd;
8910 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
8911 
8912 	ASSERT(mutex_owned(&item->mutex));
8913 	while ((cmd = mpt->m_doneq) != NULL) {
8914 		if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8915 			mpt->m_donetail = &mpt->m_doneq;
8916 		}
8917 		cmd->cmd_linkp = NULL;
8918 		*item->donetail = cmd;
8919 		item->donetail = &cmd->cmd_linkp;
8920 		mpt->m_doneq_len--;
8921 		item->len++;
8922 	}
8923 }
8924 
8925 void
mptsas_fma_check(mptsas_t * mpt,mptsas_cmd_t * cmd)8926 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8927 {
8928 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
8929 
8930 	/* Check all acc and dma handles */
8931 	if ((mptsas_check_acc_handle(mpt->m_datap) !=
8932 	    DDI_SUCCESS) ||
8933 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8934 	    DDI_SUCCESS) ||
8935 	    (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
8936 	    DDI_SUCCESS) ||
8937 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8938 	    DDI_SUCCESS) ||
8939 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8940 	    DDI_SUCCESS) ||
8941 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8942 	    DDI_SUCCESS) ||
8943 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8944 	    DDI_SUCCESS) ||
8945 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
8946 	    DDI_SUCCESS)) {
8947 		ddi_fm_service_impact(mpt->m_dip,
8948 		    DDI_SERVICE_UNAFFECTED);
8949 		ddi_fm_acc_err_clear(mpt->m_config_handle,
8950 		    DDI_FME_VER0);
8951 		pkt->pkt_reason = CMD_TRAN_ERR;
8952 		pkt->pkt_statistics = 0;
8953 	}
8954 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8955 	    DDI_SUCCESS) ||
8956 	    (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
8957 	    DDI_SUCCESS) ||
8958 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8959 	    DDI_SUCCESS) ||
8960 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8961 	    DDI_SUCCESS) ||
8962 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8963 	    DDI_SUCCESS) ||
8964 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8965 	    DDI_SUCCESS)) {
8966 		ddi_fm_service_impact(mpt->m_dip,
8967 		    DDI_SERVICE_UNAFFECTED);
8968 		pkt->pkt_reason = CMD_TRAN_ERR;
8969 		pkt->pkt_statistics = 0;
8970 	}
8971 	if (cmd->cmd_dmahandle &&
8972 	    (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8973 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8974 		pkt->pkt_reason = CMD_TRAN_ERR;
8975 		pkt->pkt_statistics = 0;
8976 	}
8977 	if ((cmd->cmd_extra_frames &&
8978 	    ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8979 	    DDI_SUCCESS) ||
8980 	    (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8981 	    DDI_SUCCESS)))) {
8982 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8983 		pkt->pkt_reason = CMD_TRAN_ERR;
8984 		pkt->pkt_statistics = 0;
8985 	}
8986 }
8987 
8988 /*
8989  * These routines manipulate the queue of commands that
8990  * are waiting for their completion routines to be called.
8991  * The queue is usually in FIFO order but on an MP system
8992  * it's possible for the completion routines to get out
8993  * of order. If that's a problem you need to add a global
8994  * mutex around the code that calls the completion routine
8995  * in the interrupt handler.
8996  */
8997 static void
mptsas_doneq_add(mptsas_t * mpt,mptsas_cmd_t * cmd)8998 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8999 {
9000 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
9001 
9002 	NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
9003 
9004 	ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
9005 	cmd->cmd_linkp = NULL;
9006 	cmd->cmd_flags |= CFLAG_FINISHED;
9007 	cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
9008 
9009 	mptsas_fma_check(mpt, cmd);
9010 
9011 	/*
9012 	 * only add scsi pkts that have completion routines to
9013 	 * the doneq.  no intr cmds do not have callbacks.
9014 	 */
9015 	if (pkt && (pkt->pkt_comp)) {
9016 		*mpt->m_donetail = cmd;
9017 		mpt->m_donetail = &cmd->cmd_linkp;
9018 		mpt->m_doneq_len++;
9019 	}
9020 }
9021 
9022 static mptsas_cmd_t *
mptsas_doneq_thread_rm(mptsas_t * mpt,uint64_t t)9023 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
9024 {
9025 	mptsas_cmd_t			*cmd;
9026 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
9027 
9028 	/* pop one off the done queue */
9029 	if ((cmd = item->doneq) != NULL) {
9030 		/* if the queue is now empty fix the tail pointer */
9031 		NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
9032 		if ((item->doneq = cmd->cmd_linkp) == NULL) {
9033 			item->donetail = &item->doneq;
9034 		}
9035 		cmd->cmd_linkp = NULL;
9036 		item->len--;
9037 	}
9038 	return (cmd);
9039 }
9040 
9041 static void
mptsas_doneq_empty(mptsas_t * mpt)9042 mptsas_doneq_empty(mptsas_t *mpt)
9043 {
9044 	if (mpt->m_doneq && !mpt->m_in_callback) {
9045 		mptsas_cmd_t	*cmd, *next;
9046 		struct scsi_pkt *pkt;
9047 
9048 		mpt->m_in_callback = 1;
9049 		cmd = mpt->m_doneq;
9050 		mpt->m_doneq = NULL;
9051 		mpt->m_donetail = &mpt->m_doneq;
9052 		mpt->m_doneq_len = 0;
9053 
9054 		mutex_exit(&mpt->m_mutex);
9055 		/*
9056 		 * run the completion routines of all the
9057 		 * completed commands
9058 		 */
9059 		while (cmd != NULL) {
9060 			next = cmd->cmd_linkp;
9061 			cmd->cmd_linkp = NULL;
9062 			/* run this command's completion routine */
9063 			cmd->cmd_flags |= CFLAG_COMPLETED;
9064 			pkt = CMD2PKT(cmd);
9065 			mptsas_pkt_comp(pkt, cmd);
9066 			cmd = next;
9067 		}
9068 		mutex_enter(&mpt->m_mutex);
9069 		mpt->m_in_callback = 0;
9070 	}
9071 }
9072 
9073 /*
9074  * These routines manipulate the target's queue of pending requests
9075  */
9076 void
mptsas_waitq_add(mptsas_t * mpt,mptsas_cmd_t * cmd)9077 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9078 {
9079 	NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
9080 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9081 	cmd->cmd_queued = TRUE;
9082 	if (ptgt)
9083 		ptgt->m_t_nwait++;
9084 	if (cmd->cmd_pkt_flags & FLAG_HEAD) {
9085 		if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
9086 			mpt->m_waitqtail = &cmd->cmd_linkp;
9087 		}
9088 		mpt->m_waitq = cmd;
9089 	} else {
9090 		cmd->cmd_linkp = NULL;
9091 		*(mpt->m_waitqtail) = cmd;
9092 		mpt->m_waitqtail = &cmd->cmd_linkp;
9093 	}
9094 }
9095 
9096 static mptsas_cmd_t *
mptsas_waitq_rm(mptsas_t * mpt)9097 mptsas_waitq_rm(mptsas_t *mpt)
9098 {
9099 	mptsas_cmd_t	*cmd;
9100 	mptsas_target_t *ptgt;
9101 	NDBG7(("mptsas_waitq_rm"));
9102 
9103 	MPTSAS_WAITQ_RM(mpt, cmd);
9104 
9105 	NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
9106 	if (cmd) {
9107 		ptgt = cmd->cmd_tgt_addr;
9108 		if (ptgt) {
9109 			ptgt->m_t_nwait--;
9110 			ASSERT(ptgt->m_t_nwait >= 0);
9111 		}
9112 	}
9113 	return (cmd);
9114 }
9115 
9116 /*
9117  * remove specified cmd from the middle of the wait queue.
9118  */
9119 static void
mptsas_waitq_delete(mptsas_t * mpt,mptsas_cmd_t * cmd)9120 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9121 {
9122 	mptsas_cmd_t	*prevp = mpt->m_waitq;
9123 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9124 
9125 	NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9126 	    (void *)mpt, (void *)cmd));
9127 	if (ptgt) {
9128 		ptgt->m_t_nwait--;
9129 		ASSERT(ptgt->m_t_nwait >= 0);
9130 	}
9131 
9132 	if (prevp == cmd) {
9133 		if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
9134 			mpt->m_waitqtail = &mpt->m_waitq;
9135 
9136 		cmd->cmd_linkp = NULL;
9137 		cmd->cmd_queued = FALSE;
9138 		NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9139 		    (void *)mpt, (void *)cmd));
9140 		return;
9141 	}
9142 
9143 	while (prevp != NULL) {
9144 		if (prevp->cmd_linkp == cmd) {
9145 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9146 				mpt->m_waitqtail = &prevp->cmd_linkp;
9147 
9148 			cmd->cmd_linkp = NULL;
9149 			cmd->cmd_queued = FALSE;
9150 			NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9151 			    (void *)mpt, (void *)cmd));
9152 			return;
9153 		}
9154 		prevp = prevp->cmd_linkp;
9155 	}
9156 	cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
9157 }
9158 
9159 static mptsas_cmd_t *
mptsas_tx_waitq_rm(mptsas_t * mpt)9160 mptsas_tx_waitq_rm(mptsas_t *mpt)
9161 {
9162 	mptsas_cmd_t *cmd;
9163 	NDBG7(("mptsas_tx_waitq_rm"));
9164 
9165 	MPTSAS_TX_WAITQ_RM(mpt, cmd);
9166 
9167 	NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
9168 
9169 	return (cmd);
9170 }
9171 
9172 /*
9173  * remove specified cmd from the middle of the tx_waitq.
9174  */
9175 static void
mptsas_tx_waitq_delete(mptsas_t * mpt,mptsas_cmd_t * cmd)9176 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9177 {
9178 	mptsas_cmd_t *prevp = mpt->m_tx_waitq;
9179 
9180 	NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9181 	    (void *)mpt, (void *)cmd));
9182 
9183 	if (prevp == cmd) {
9184 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
9185 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
9186 
9187 		cmd->cmd_linkp = NULL;
9188 		cmd->cmd_queued = FALSE;
9189 		NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9190 		    (void *)mpt, (void *)cmd));
9191 		return;
9192 	}
9193 
9194 	while (prevp != NULL) {
9195 		if (prevp->cmd_linkp == cmd) {
9196 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9197 				mpt->m_tx_waitqtail = &prevp->cmd_linkp;
9198 
9199 			cmd->cmd_linkp = NULL;
9200 			cmd->cmd_queued = FALSE;
9201 			NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9202 			    (void *)mpt, (void *)cmd));
9203 			return;
9204 		}
9205 		prevp = prevp->cmd_linkp;
9206 	}
9207 	cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
9208 }
9209 
9210 /*
9211  * device and bus reset handling
9212  *
9213  * Notes:
9214  *	- RESET_ALL:	reset the controller
9215  *	- RESET_TARGET:	reset the target specified in scsi_address
9216  */
9217 static int
mptsas_scsi_reset(struct scsi_address * ap,int level)9218 mptsas_scsi_reset(struct scsi_address *ap, int level)
9219 {
9220 	mptsas_t		*mpt = ADDR2MPT(ap);
9221 	int			rval;
9222 	mptsas_tgt_private_t	*tgt_private;
9223 	mptsas_target_t		*ptgt = NULL;
9224 
9225 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
9226 	ptgt = tgt_private->t_private;
9227 	if (ptgt == NULL) {
9228 		return (FALSE);
9229 	}
9230 	NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
9231 	    level));
9232 
9233 	mutex_enter(&mpt->m_mutex);
9234 	/*
9235 	 * if we are not in panic set up a reset delay for this target
9236 	 */
9237 	if (!ddi_in_panic()) {
9238 		mptsas_setup_bus_reset_delay(mpt);
9239 	} else {
9240 		drv_usecwait(mpt->m_scsi_reset_delay * 1000);
9241 	}
9242 	rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
9243 	mutex_exit(&mpt->m_mutex);
9244 
9245 	/*
9246 	 * The transport layer expect to only see TRUE and
9247 	 * FALSE. Therefore, we will adjust the return value
9248 	 * if mptsas_do_scsi_reset returns FAILED.
9249 	 */
9250 	if (rval == FAILED)
9251 		rval = FALSE;
9252 	return (rval);
9253 }
9254 
9255 static int
mptsas_do_scsi_reset(mptsas_t * mpt,uint16_t devhdl)9256 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
9257 {
9258 	int		rval = FALSE;
9259 	uint8_t		config, disk;
9260 
9261 	ASSERT(mutex_owned(&mpt->m_mutex));
9262 
9263 	if (mptsas_debug_resets) {
9264 		mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
9265 		    devhdl);
9266 	}
9267 
9268 	/*
9269 	 * Issue a Target Reset message to the target specified but not to a
9270 	 * disk making up a raid volume.  Just look through the RAID config
9271 	 * Phys Disk list of DevHandles.  If the target's DevHandle is in this
9272 	 * list, then don't reset this target.
9273 	 */
9274 	for (config = 0; config < mpt->m_num_raid_configs; config++) {
9275 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9276 			if (devhdl == mpt->m_raidconfig[config].
9277 			    m_physdisk_devhdl[disk]) {
9278 				return (TRUE);
9279 			}
9280 		}
9281 	}
9282 
9283 	rval = mptsas_ioc_task_management(mpt,
9284 	    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9285 
9286 	mptsas_doneq_empty(mpt);
9287 	return (rval);
9288 }
9289 
9290 static int
mptsas_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)9291 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9292     void (*callback)(caddr_t), caddr_t arg)
9293 {
9294 	mptsas_t	*mpt = ADDR2MPT(ap);
9295 
9296 	NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9297 
9298 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9299 	    &mpt->m_mutex, &mpt->m_reset_notify_listf));
9300 }
9301 
9302 static int
mptsas_get_name(struct scsi_device * sd,char * name,int len)9303 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9304 {
9305 	dev_info_t	*lun_dip = NULL;
9306 
9307 	ASSERT(sd != NULL);
9308 	ASSERT(name != NULL);
9309 	lun_dip = sd->sd_dev;
9310 	ASSERT(lun_dip != NULL);
9311 
9312 	if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9313 		return (1);
9314 	} else {
9315 		return (0);
9316 	}
9317 }
9318 
9319 static int
mptsas_get_bus_addr(struct scsi_device * sd,char * name,int len)9320 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9321 {
9322 	return (mptsas_get_name(sd, name, len));
9323 }
9324 
9325 void
mptsas_set_throttle(mptsas_t * mpt,mptsas_target_t * ptgt,int what)9326 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9327 {
9328 
9329 	NDBG25(("mptsas_set_throttle: throttle=%x", what));
9330 
9331 	/*
9332 	 * if the bus is draining/quiesced, no changes to the throttles
9333 	 * are allowed. Not allowing change of throttles during draining
9334 	 * limits error recovery but will reduce draining time
9335 	 *
9336 	 * all throttles should have been set to HOLD_THROTTLE
9337 	 */
9338 	if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9339 		return;
9340 	}
9341 
9342 	if (what == HOLD_THROTTLE) {
9343 		ptgt->m_t_throttle = HOLD_THROTTLE;
9344 	} else if (ptgt->m_reset_delay == 0) {
9345 		ptgt->m_t_throttle = what;
9346 	}
9347 }
9348 
9349 /*
9350  * Clean up from a device reset.
9351  * For the case of target reset, this function clears the waitq of all
9352  * commands for a particular target.   For the case of abort task set, this
9353  * function clears the waitq of all commonds for a particular target/lun.
9354  */
9355 static void
mptsas_flush_target(mptsas_t * mpt,ushort_t target,int lun,uint8_t tasktype)9356 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9357 {
9358 	mptsas_slots_t	*slots = mpt->m_active;
9359 	mptsas_cmd_t	*cmd, *next_cmd;
9360 	int		slot;
9361 	uchar_t		reason;
9362 	uint_t		stat;
9363 	hrtime_t	timestamp;
9364 
9365 	NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9366 
9367 	timestamp = gethrtime();
9368 
9369 	/*
9370 	 * Make sure the I/O Controller has flushed all cmds
9371 	 * that are associated with this target for a target reset
9372 	 * and target/lun for abort task set.
9373 	 * Account for TM requests, which use the last SMID.
9374 	 */
9375 	for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9376 		if ((cmd = slots->m_slot[slot]) == NULL)
9377 			continue;
9378 		reason = CMD_RESET;
9379 		stat = STAT_DEV_RESET;
9380 		switch (tasktype) {
9381 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9382 			if (Tgt(cmd) == target) {
9383 				if (cmd->cmd_active_expiration <= timestamp) {
9384 					/*
9385 					 * When timeout requested, propagate
9386 					 * proper reason and statistics to
9387 					 * target drivers.
9388 					 */
9389 					reason = CMD_TIMEOUT;
9390 					stat |= STAT_TIMEOUT;
9391 				}
9392 				NDBG25(("mptsas_flush_target discovered non-"
9393 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
9394 				    tasktype));
9395 				mptsas_dump_cmd(mpt, cmd);
9396 				mptsas_remove_cmd(mpt, cmd);
9397 				mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9398 				mptsas_doneq_add(mpt, cmd);
9399 			}
9400 			break;
9401 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9402 			reason = CMD_ABORTED;
9403 			stat = STAT_ABORTED;
9404 			/*FALLTHROUGH*/
9405 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9406 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9407 
9408 				NDBG25(("mptsas_flush_target discovered non-"
9409 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
9410 				    tasktype));
9411 				mptsas_dump_cmd(mpt, cmd);
9412 				mptsas_remove_cmd(mpt, cmd);
9413 				mptsas_set_pkt_reason(mpt, cmd, reason,
9414 				    stat);
9415 				mptsas_doneq_add(mpt, cmd);
9416 			}
9417 			break;
9418 		default:
9419 			break;
9420 		}
9421 	}
9422 
9423 	/*
9424 	 * Flush the waitq and tx_waitq of this target's cmds
9425 	 */
9426 	cmd = mpt->m_waitq;
9427 
9428 	reason = CMD_RESET;
9429 	stat = STAT_DEV_RESET;
9430 
9431 	switch (tasktype) {
9432 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9433 		while (cmd != NULL) {
9434 			next_cmd = cmd->cmd_linkp;
9435 			if (Tgt(cmd) == target) {
9436 				mptsas_waitq_delete(mpt, cmd);
9437 				mptsas_set_pkt_reason(mpt, cmd,
9438 				    reason, stat);
9439 				mptsas_doneq_add(mpt, cmd);
9440 			}
9441 			cmd = next_cmd;
9442 		}
9443 		mutex_enter(&mpt->m_tx_waitq_mutex);
9444 		cmd = mpt->m_tx_waitq;
9445 		while (cmd != NULL) {
9446 			next_cmd = cmd->cmd_linkp;
9447 			if (Tgt(cmd) == target) {
9448 				mptsas_tx_waitq_delete(mpt, cmd);
9449 				mutex_exit(&mpt->m_tx_waitq_mutex);
9450 				mptsas_set_pkt_reason(mpt, cmd,
9451 				    reason, stat);
9452 				mptsas_doneq_add(mpt, cmd);
9453 				mutex_enter(&mpt->m_tx_waitq_mutex);
9454 			}
9455 			cmd = next_cmd;
9456 		}
9457 		mutex_exit(&mpt->m_tx_waitq_mutex);
9458 		break;
9459 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9460 		reason = CMD_ABORTED;
9461 		stat =  STAT_ABORTED;
9462 		/*FALLTHROUGH*/
9463 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9464 		while (cmd != NULL) {
9465 			next_cmd = cmd->cmd_linkp;
9466 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9467 				mptsas_waitq_delete(mpt, cmd);
9468 				mptsas_set_pkt_reason(mpt, cmd,
9469 				    reason, stat);
9470 				mptsas_doneq_add(mpt, cmd);
9471 			}
9472 			cmd = next_cmd;
9473 		}
9474 		mutex_enter(&mpt->m_tx_waitq_mutex);
9475 		cmd = mpt->m_tx_waitq;
9476 		while (cmd != NULL) {
9477 			next_cmd = cmd->cmd_linkp;
9478 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9479 				mptsas_tx_waitq_delete(mpt, cmd);
9480 				mutex_exit(&mpt->m_tx_waitq_mutex);
9481 				mptsas_set_pkt_reason(mpt, cmd,
9482 				    reason, stat);
9483 				mptsas_doneq_add(mpt, cmd);
9484 				mutex_enter(&mpt->m_tx_waitq_mutex);
9485 			}
9486 			cmd = next_cmd;
9487 		}
9488 		mutex_exit(&mpt->m_tx_waitq_mutex);
9489 		break;
9490 	default:
9491 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9492 		    tasktype);
9493 		break;
9494 	}
9495 }
9496 
9497 /*
9498  * Clean up hba state, abort all outstanding command and commands in waitq
9499  * reset timeout of all targets.
9500  */
9501 static void
mptsas_flush_hba(mptsas_t * mpt)9502 mptsas_flush_hba(mptsas_t *mpt)
9503 {
9504 	mptsas_slots_t	*slots = mpt->m_active;
9505 	mptsas_cmd_t	*cmd;
9506 	int		slot;
9507 
9508 	NDBG25(("mptsas_flush_hba"));
9509 
9510 	/*
9511 	 * The I/O Controller should have already sent back
9512 	 * all commands via the scsi I/O reply frame.  Make
9513 	 * sure all commands have been flushed.
9514 	 * Account for TM request, which use the last SMID.
9515 	 */
9516 	for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9517 		if ((cmd = slots->m_slot[slot]) == NULL)
9518 			continue;
9519 
9520 		if (cmd->cmd_flags & CFLAG_CMDIOC) {
9521 			/*
9522 			 * Need to make sure to tell everyone that might be
9523 			 * waiting on this command that it's going to fail.  If
9524 			 * we get here, this command will never timeout because
9525 			 * the active command table is going to be re-allocated,
9526 			 * so there will be nothing to check against a time out.
9527 			 * Instead, mark the command as failed due to reset.
9528 			 */
9529 			mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9530 			    STAT_BUS_RESET);
9531 			if ((cmd->cmd_flags &
9532 			    (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9533 				cmd->cmd_flags |= CFLAG_FINISHED;
9534 				cv_broadcast(&mpt->m_passthru_cv);
9535 				cv_broadcast(&mpt->m_config_cv);
9536 				cv_broadcast(&mpt->m_fw_diag_cv);
9537 			}
9538 			continue;
9539 		}
9540 
9541 		NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9542 		    slot));
9543 		mptsas_dump_cmd(mpt, cmd);
9544 
9545 		mptsas_remove_cmd(mpt, cmd);
9546 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9547 		mptsas_doneq_add(mpt, cmd);
9548 	}
9549 
9550 	/*
9551 	 * Flush the waitq.
9552 	 */
9553 	while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9554 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9555 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9556 		    (cmd->cmd_flags & CFLAG_CONFIG) ||
9557 		    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9558 			cmd->cmd_flags |= CFLAG_FINISHED;
9559 			cv_broadcast(&mpt->m_passthru_cv);
9560 			cv_broadcast(&mpt->m_config_cv);
9561 			cv_broadcast(&mpt->m_fw_diag_cv);
9562 		} else {
9563 			mptsas_doneq_add(mpt, cmd);
9564 		}
9565 	}
9566 
9567 	/*
9568 	 * Flush the tx_waitq
9569 	 */
9570 	mutex_enter(&mpt->m_tx_waitq_mutex);
9571 	while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9572 		mutex_exit(&mpt->m_tx_waitq_mutex);
9573 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9574 		mptsas_doneq_add(mpt, cmd);
9575 		mutex_enter(&mpt->m_tx_waitq_mutex);
9576 	}
9577 	mutex_exit(&mpt->m_tx_waitq_mutex);
9578 
9579 	/*
9580 	 * Drain the taskqs prior to reallocating resources. The thread
9581 	 * passing through here could be launched from either (dr)
9582 	 * or (event) taskqs so only wait on the 'other' queue since
9583 	 * waiting on 'this' queue is a deadlock condition.
9584 	 */
9585 	mutex_exit(&mpt->m_mutex);
9586 	if (!taskq_member((taskq_t *)mpt->m_event_taskq, curthread))
9587 		ddi_taskq_wait(mpt->m_event_taskq);
9588 	if (!taskq_member((taskq_t *)mpt->m_dr_taskq, curthread))
9589 		ddi_taskq_wait(mpt->m_dr_taskq);
9590 
9591 	mutex_enter(&mpt->m_mutex);
9592 }
9593 
9594 /*
9595  * set pkt_reason and OR in pkt_statistics flag
9596  */
9597 static void
mptsas_set_pkt_reason(mptsas_t * mpt,mptsas_cmd_t * cmd,uchar_t reason,uint_t stat)9598 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9599     uint_t stat)
9600 {
9601 #ifndef __lock_lint
9602 	_NOTE(ARGUNUSED(mpt))
9603 #endif
9604 
9605 	NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9606 	    (void *)cmd, reason, stat));
9607 
9608 	if (cmd) {
9609 		if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9610 			cmd->cmd_pkt->pkt_reason = reason;
9611 		}
9612 		cmd->cmd_pkt->pkt_statistics |= stat;
9613 	}
9614 }
9615 
9616 static void
mptsas_start_watch_reset_delay()9617 mptsas_start_watch_reset_delay()
9618 {
9619 	NDBG22(("mptsas_start_watch_reset_delay"));
9620 
9621 	mutex_enter(&mptsas_global_mutex);
9622 	if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9623 		mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9624 		    drv_usectohz((clock_t)
9625 		    MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9626 		ASSERT(mptsas_reset_watch != NULL);
9627 	}
9628 	mutex_exit(&mptsas_global_mutex);
9629 }
9630 
9631 static void
mptsas_setup_bus_reset_delay(mptsas_t * mpt)9632 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9633 {
9634 	mptsas_target_t	*ptgt = NULL;
9635 
9636 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
9637 
9638 	NDBG22(("mptsas_setup_bus_reset_delay"));
9639 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9640 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
9641 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9642 		ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9643 	}
9644 
9645 	mptsas_start_watch_reset_delay();
9646 }
9647 
9648 /*
9649  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9650  * mpt instance for active reset delays
9651  */
9652 static void
mptsas_watch_reset_delay(void * arg)9653 mptsas_watch_reset_delay(void *arg)
9654 {
9655 #ifndef __lock_lint
9656 	_NOTE(ARGUNUSED(arg))
9657 #endif
9658 
9659 	mptsas_t	*mpt;
9660 	int		not_done = 0;
9661 
9662 	NDBG22(("mptsas_watch_reset_delay"));
9663 
9664 	mutex_enter(&mptsas_global_mutex);
9665 	mptsas_reset_watch = 0;
9666 	mutex_exit(&mptsas_global_mutex);
9667 	rw_enter(&mptsas_global_rwlock, RW_READER);
9668 	for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9669 		if (mpt->m_tran == 0) {
9670 			continue;
9671 		}
9672 		mutex_enter(&mpt->m_mutex);
9673 		not_done += mptsas_watch_reset_delay_subr(mpt);
9674 		mutex_exit(&mpt->m_mutex);
9675 	}
9676 	rw_exit(&mptsas_global_rwlock);
9677 
9678 	if (not_done) {
9679 		mptsas_start_watch_reset_delay();
9680 	}
9681 }
9682 
9683 static int
mptsas_watch_reset_delay_subr(mptsas_t * mpt)9684 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9685 {
9686 	int		done = 0;
9687 	int		restart = 0;
9688 	mptsas_target_t	*ptgt = NULL;
9689 
9690 	NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9691 
9692 	ASSERT(mutex_owned(&mpt->m_mutex));
9693 
9694 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9695 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
9696 		if (ptgt->m_reset_delay != 0) {
9697 			ptgt->m_reset_delay -=
9698 			    MPTSAS_WATCH_RESET_DELAY_TICK;
9699 			if (ptgt->m_reset_delay <= 0) {
9700 				ptgt->m_reset_delay = 0;
9701 				mptsas_set_throttle(mpt, ptgt,
9702 				    MAX_THROTTLE);
9703 				restart++;
9704 			} else {
9705 				done = -1;
9706 			}
9707 		}
9708 	}
9709 
9710 	if (restart > 0) {
9711 		mptsas_restart_hba(mpt);
9712 	}
9713 	return (done);
9714 }
9715 
9716 #ifdef MPTSAS_TEST
9717 static void
mptsas_test_reset(mptsas_t * mpt,int target)9718 mptsas_test_reset(mptsas_t *mpt, int target)
9719 {
9720 	mptsas_target_t    *ptgt = NULL;
9721 
9722 	if (mptsas_rtest == target) {
9723 		if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9724 			mptsas_rtest = -1;
9725 		}
9726 		if (mptsas_rtest == -1) {
9727 			NDBG22(("mptsas_test_reset success"));
9728 		}
9729 	}
9730 }
9731 #endif
9732 
9733 /*
9734  * abort handling:
9735  *
9736  * Notes:
9737  *	- if pkt is not NULL, abort just that command
9738  *	- if pkt is NULL, abort all outstanding commands for target
9739  */
9740 static int
mptsas_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)9741 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9742 {
9743 	mptsas_t		*mpt = ADDR2MPT(ap);
9744 	int			rval;
9745 	mptsas_tgt_private_t	*tgt_private;
9746 	int			target, lun;
9747 
9748 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9749 	    tran_tgt_private;
9750 	ASSERT(tgt_private != NULL);
9751 	target = tgt_private->t_private->m_devhdl;
9752 	lun = tgt_private->t_lun;
9753 
9754 	NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9755 
9756 	mutex_enter(&mpt->m_mutex);
9757 	rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9758 	mutex_exit(&mpt->m_mutex);
9759 	return (rval);
9760 }
9761 
9762 static int
mptsas_do_scsi_abort(mptsas_t * mpt,int target,int lun,struct scsi_pkt * pkt)9763 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9764 {
9765 	mptsas_cmd_t	*sp = NULL;
9766 	mptsas_slots_t	*slots = mpt->m_active;
9767 	int		rval = FALSE;
9768 
9769 	ASSERT(mutex_owned(&mpt->m_mutex));
9770 
9771 	/*
9772 	 * Abort the command pkt on the target/lun in ap.  If pkt is
9773 	 * NULL, abort all outstanding commands on that target/lun.
9774 	 * If you can abort them, return 1, else return 0.
9775 	 * Each packet that's aborted should be sent back to the target
9776 	 * driver through the callback routine, with pkt_reason set to
9777 	 * CMD_ABORTED.
9778 	 *
9779 	 * abort cmd pkt on HBA hardware; clean out of outstanding
9780 	 * command lists, etc.
9781 	 */
9782 	if (pkt != NULL) {
9783 		/* abort the specified packet */
9784 		sp = PKT2CMD(pkt);
9785 
9786 		if (sp->cmd_queued) {
9787 			NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9788 			    (void *)sp));
9789 			mptsas_waitq_delete(mpt, sp);
9790 			mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9791 			    STAT_ABORTED);
9792 			mptsas_doneq_add(mpt, sp);
9793 			rval = TRUE;
9794 			goto done;
9795 		}
9796 
9797 		/*
9798 		 * Have mpt firmware abort this command
9799 		 */
9800 
9801 		if (slots->m_slot[sp->cmd_slot] != NULL) {
9802 			rval = mptsas_ioc_task_management(mpt,
9803 			    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9804 			    lun, NULL, 0, 0);
9805 
9806 			/*
9807 			 * The transport layer expects only TRUE and FALSE.
9808 			 * Therefore, if mptsas_ioc_task_management returns
9809 			 * FAILED we will return FALSE.
9810 			 */
9811 			if (rval == FAILED)
9812 				rval = FALSE;
9813 			goto done;
9814 		}
9815 	}
9816 
9817 	/*
9818 	 * If pkt is NULL then abort task set
9819 	 */
9820 	rval = mptsas_ioc_task_management(mpt,
9821 	    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9822 
9823 	/*
9824 	 * The transport layer expects only TRUE and FALSE.
9825 	 * Therefore, if mptsas_ioc_task_management returns
9826 	 * FAILED we will return FALSE.
9827 	 */
9828 	if (rval == FAILED)
9829 		rval = FALSE;
9830 
9831 #ifdef MPTSAS_TEST
9832 	if (rval && mptsas_test_stop) {
9833 		debug_enter("mptsas_do_scsi_abort");
9834 	}
9835 #endif
9836 
9837 done:
9838 	mptsas_doneq_empty(mpt);
9839 	return (rval);
9840 }
9841 
9842 /*
9843  * capability handling:
9844  * (*tran_getcap).  Get the capability named, and return its value.
9845  */
9846 static int
mptsas_scsi_getcap(struct scsi_address * ap,char * cap,int tgtonly)9847 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9848 {
9849 	mptsas_t	*mpt = ADDR2MPT(ap);
9850 	int		ckey;
9851 	int		rval = FALSE;
9852 
9853 	NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9854 	    ap->a_target, cap, tgtonly));
9855 
9856 	mutex_enter(&mpt->m_mutex);
9857 
9858 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9859 		mutex_exit(&mpt->m_mutex);
9860 		return (UNDEFINED);
9861 	}
9862 
9863 	switch (ckey) {
9864 	case SCSI_CAP_DMA_MAX:
9865 		rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9866 		break;
9867 	case SCSI_CAP_ARQ:
9868 		rval = TRUE;
9869 		break;
9870 	case SCSI_CAP_MSG_OUT:
9871 	case SCSI_CAP_PARITY:
9872 	case SCSI_CAP_UNTAGGED_QING:
9873 		rval = TRUE;
9874 		break;
9875 	case SCSI_CAP_TAGGED_QING:
9876 		rval = TRUE;
9877 		break;
9878 	case SCSI_CAP_RESET_NOTIFICATION:
9879 		rval = TRUE;
9880 		break;
9881 	case SCSI_CAP_LINKED_CMDS:
9882 		rval = FALSE;
9883 		break;
9884 	case SCSI_CAP_QFULL_RETRIES:
9885 		rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9886 		    tran_tgt_private))->t_private->m_qfull_retries;
9887 		break;
9888 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
9889 		rval = drv_hztousec(((mptsas_tgt_private_t *)
9890 		    (ap->a_hba_tran->tran_tgt_private))->
9891 		    t_private->m_qfull_retry_interval) / 1000;
9892 		break;
9893 	case SCSI_CAP_CDB_LEN:
9894 		rval = CDB_GROUP4;
9895 		break;
9896 	case SCSI_CAP_INTERCONNECT_TYPE:
9897 		rval = INTERCONNECT_SAS;
9898 		break;
9899 	case SCSI_CAP_TRAN_LAYER_RETRIES:
9900 		if (mpt->m_ioc_capabilities &
9901 		    MPI2_IOCFACTS_CAPABILITY_TLR)
9902 			rval = TRUE;
9903 		else
9904 			rval = FALSE;
9905 		break;
9906 	default:
9907 		rval = UNDEFINED;
9908 		break;
9909 	}
9910 
9911 	NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9912 
9913 	mutex_exit(&mpt->m_mutex);
9914 	return (rval);
9915 }
9916 
9917 /*
9918  * (*tran_setcap).  Set the capability named to the value given.
9919  */
9920 static int
mptsas_scsi_setcap(struct scsi_address * ap,char * cap,int value,int tgtonly)9921 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9922 {
9923 	mptsas_t	*mpt = ADDR2MPT(ap);
9924 	int		ckey;
9925 	int		rval = FALSE;
9926 
9927 	NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9928 	    ap->a_target, cap, value, tgtonly));
9929 
9930 	if (!tgtonly) {
9931 		return (rval);
9932 	}
9933 
9934 	mutex_enter(&mpt->m_mutex);
9935 
9936 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9937 		mutex_exit(&mpt->m_mutex);
9938 		return (UNDEFINED);
9939 	}
9940 
9941 	switch (ckey) {
9942 	case SCSI_CAP_DMA_MAX:
9943 	case SCSI_CAP_MSG_OUT:
9944 	case SCSI_CAP_PARITY:
9945 	case SCSI_CAP_INITIATOR_ID:
9946 	case SCSI_CAP_LINKED_CMDS:
9947 	case SCSI_CAP_UNTAGGED_QING:
9948 	case SCSI_CAP_RESET_NOTIFICATION:
9949 		/*
9950 		 * None of these are settable via
9951 		 * the capability interface.
9952 		 */
9953 		break;
9954 	case SCSI_CAP_ARQ:
9955 		/*
9956 		 * We cannot turn off arq so return false if asked to
9957 		 */
9958 		if (value) {
9959 			rval = TRUE;
9960 		} else {
9961 			rval = FALSE;
9962 		}
9963 		break;
9964 	case SCSI_CAP_TAGGED_QING:
9965 		mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9966 		    (ap->a_hba_tran->tran_tgt_private))->t_private,
9967 		    MAX_THROTTLE);
9968 		rval = TRUE;
9969 		break;
9970 	case SCSI_CAP_QFULL_RETRIES:
9971 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9972 		    t_private->m_qfull_retries = (uchar_t)value;
9973 		rval = TRUE;
9974 		break;
9975 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
9976 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9977 		    t_private->m_qfull_retry_interval =
9978 		    drv_usectohz(value * 1000);
9979 		rval = TRUE;
9980 		break;
9981 	default:
9982 		rval = UNDEFINED;
9983 		break;
9984 	}
9985 	mutex_exit(&mpt->m_mutex);
9986 	return (rval);
9987 }
9988 
9989 /*
9990  * Utility routine for mptsas_ifsetcap/ifgetcap
9991  */
9992 /*ARGSUSED*/
9993 static int
mptsas_scsi_capchk(char * cap,int tgtonly,int * cidxp)9994 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9995 {
9996 	NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9997 
9998 	if (!cap)
9999 		return (FALSE);
10000 
10001 	*cidxp = scsi_hba_lookup_capstr(cap);
10002 	return (TRUE);
10003 }
10004 
10005 static int
mptsas_alloc_active_slots(mptsas_t * mpt,int flag)10006 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
10007 {
10008 	mptsas_slots_t	*old_active = mpt->m_active;
10009 	mptsas_slots_t	*new_active;
10010 	size_t		size;
10011 
10012 	/*
10013 	 * if there are active commands, then we cannot
10014 	 * change size of active slots array.
10015 	 */
10016 	ASSERT(mpt->m_ncmds == 0);
10017 
10018 	size = MPTSAS_SLOTS_SIZE(mpt);
10019 	new_active = kmem_zalloc(size, flag);
10020 	if (new_active == NULL) {
10021 		NDBG1(("new active alloc failed"));
10022 		return (-1);
10023 	}
10024 	/*
10025 	 * Since SMID 0 is reserved and the TM slot is reserved, the
10026 	 * number of slots that can be used at any one time is
10027 	 * m_max_requests - 2.
10028 	 */
10029 	new_active->m_n_normal = (mpt->m_max_requests - 2);
10030 	new_active->m_size = size;
10031 	new_active->m_rotor = 1;
10032 	if (old_active)
10033 		mptsas_free_active_slots(mpt);
10034 	mpt->m_active = new_active;
10035 
10036 	return (0);
10037 }
10038 
10039 static void
mptsas_free_active_slots(mptsas_t * mpt)10040 mptsas_free_active_slots(mptsas_t *mpt)
10041 {
10042 	mptsas_slots_t	*active = mpt->m_active;
10043 	size_t		size;
10044 
10045 	if (active == NULL)
10046 		return;
10047 	size = active->m_size;
10048 	kmem_free(active, size);
10049 	mpt->m_active = NULL;
10050 }
10051 
10052 /*
10053  * Error logging, printing, and debug print routines.
10054  */
10055 static char *mptsas_label = "mpt_sas";
10056 
10057 /*PRINTFLIKE3*/
10058 void
mptsas_log(mptsas_t * mpt,int level,char * fmt,...)10059 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
10060 {
10061 	dev_info_t	*dev;
10062 	va_list		ap;
10063 
10064 	if (mpt) {
10065 		dev = mpt->m_dip;
10066 	} else {
10067 		dev = 0;
10068 	}
10069 
10070 	mutex_enter(&mptsas_log_mutex);
10071 
10072 	va_start(ap, fmt);
10073 	(void) vsprintf(mptsas_log_buf, fmt, ap);
10074 	va_end(ap);
10075 
10076 	if (level == CE_CONT) {
10077 		scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
10078 	} else {
10079 		scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
10080 	}
10081 
10082 	mutex_exit(&mptsas_log_mutex);
10083 }
10084 
10085 #ifdef MPTSAS_DEBUG
10086 /*
10087  * Use a circular buffer to log messages to private memory.
10088  * Increment idx atomically to minimize risk to miss lines.
10089  * It's fast and does not hold up the proceedings too much.
10090  */
10091 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10092 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10093 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10094 static uint32_t mptsas_dbglog_idx = 0;
10095 
10096 /*PRINTFLIKE1*/
10097 void
mptsas_debug_log(char * fmt,...)10098 mptsas_debug_log(char *fmt, ...)
10099 {
10100 	va_list		ap;
10101 	uint32_t	idx;
10102 
10103 	idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
10104 	    (mptsas_dbglog_linecnt - 1);
10105 
10106 	va_start(ap, fmt);
10107 	(void) vsnprintf(mptsas_dbglog_bufs[idx],
10108 	    mptsas_dbglog_linelen, fmt, ap);
10109 	va_end(ap);
10110 }
10111 
10112 /*PRINTFLIKE1*/
10113 void
mptsas_printf(char * fmt,...)10114 mptsas_printf(char *fmt, ...)
10115 {
10116 	dev_info_t	*dev = 0;
10117 	va_list		ap;
10118 
10119 	mutex_enter(&mptsas_log_mutex);
10120 
10121 	va_start(ap, fmt);
10122 	(void) vsprintf(mptsas_log_buf, fmt, ap);
10123 	va_end(ap);
10124 
10125 #ifdef PROM_PRINTF
10126 	prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
10127 #else
10128 	scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10129 #endif
10130 	mutex_exit(&mptsas_log_mutex);
10131 }
10132 #endif
10133 
10134 /*
10135  * timeout handling
10136  */
10137 static void
mptsas_watch(void * arg)10138 mptsas_watch(void *arg)
10139 {
10140 #ifndef __lock_lint
10141 	_NOTE(ARGUNUSED(arg))
10142 #endif
10143 
10144 	mptsas_t	*mpt;
10145 	uint32_t	doorbell;
10146 
10147 	NDBG30(("mptsas_watch"));
10148 
10149 	rw_enter(&mptsas_global_rwlock, RW_READER);
10150 	for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10151 
10152 		mutex_enter(&mpt->m_mutex);
10153 
10154 		/* Skip device if not powered on */
10155 		if (mpt->m_options & MPTSAS_OPT_PM) {
10156 			if (mpt->m_power_level == PM_LEVEL_D0) {
10157 				(void) pm_busy_component(mpt->m_dip, 0);
10158 				mpt->m_busy = 1;
10159 			} else {
10160 				mutex_exit(&mpt->m_mutex);
10161 				continue;
10162 			}
10163 		}
10164 
10165 		/*
10166 		 * Check if controller is in a FAULT state. If so, reset it.
10167 		 */
10168 		doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
10169 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
10170 			doorbell &= MPI2_DOORBELL_DATA_MASK;
10171 			mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10172 			    "code: %04x", doorbell);
10173 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10174 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10175 				mptsas_log(mpt, CE_WARN, "Reset failed"
10176 				    "after fault was detected");
10177 			}
10178 		}
10179 
10180 		/*
10181 		 * For now, always call mptsas_watchsubr.
10182 		 */
10183 		mptsas_watchsubr(mpt);
10184 
10185 		if (mpt->m_options & MPTSAS_OPT_PM) {
10186 			mpt->m_busy = 0;
10187 			(void) pm_idle_component(mpt->m_dip, 0);
10188 		}
10189 
10190 		mutex_exit(&mpt->m_mutex);
10191 	}
10192 	rw_exit(&mptsas_global_rwlock);
10193 
10194 	mutex_enter(&mptsas_global_mutex);
10195 	if (mptsas_timeouts_enabled)
10196 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10197 	mutex_exit(&mptsas_global_mutex);
10198 }
10199 
10200 static void
mptsas_watchsubr_tgt(mptsas_t * mpt,mptsas_target_t * ptgt,hrtime_t timestamp)10201 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10202 {
10203 	mptsas_cmd_t	*cmd;
10204 
10205 	/*
10206 	 * If we were draining due to a qfull condition,
10207 	 * go back to full throttle.
10208 	 */
10209 	if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10210 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10211 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10212 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10213 		mptsas_restart_hba(mpt);
10214 	}
10215 
10216 	cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10217 	if (cmd == NULL)
10218 		return;
10219 
10220 	if (cmd->cmd_active_expiration <= timestamp) {
10221 		/*
10222 		 * Earliest command timeout expired. Drain throttle.
10223 		 */
10224 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10225 
10226 		/*
10227 		 * Check for remaining commands.
10228 		 */
10229 		cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
10230 		if (cmd->cmd_active_expiration > timestamp) {
10231 			/*
10232 			 * Wait for remaining commands to complete or
10233 			 * time out.
10234 			 */
10235 			NDBG23(("command timed out, pending drain"));
10236 			return;
10237 		}
10238 
10239 		/*
10240 		 * All command timeouts expired.
10241 		 */
10242 		mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
10243 		    "expired with %d commands on target %d lun %d.",
10244 		    cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
10245 		    ptgt->m_devhdl, Lun(cmd));
10246 
10247 		mptsas_cmd_timeout(mpt, ptgt);
10248 	} else if (cmd->cmd_active_expiration <=
10249 	    timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
10250 		NDBG23(("pending timeout"));
10251 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10252 	}
10253 }
10254 
10255 static void
mptsas_watchsubr(mptsas_t * mpt)10256 mptsas_watchsubr(mptsas_t *mpt)
10257 {
10258 	int		i;
10259 	mptsas_cmd_t	*cmd;
10260 	mptsas_target_t	*ptgt = NULL;
10261 	hrtime_t	timestamp = gethrtime();
10262 
10263 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
10264 
10265 	NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
10266 
10267 #ifdef MPTSAS_TEST
10268 	if (mptsas_enable_untagged) {
10269 		mptsas_test_untagged++;
10270 	}
10271 #endif
10272 
10273 	/*
10274 	 * Check for commands stuck in active slot
10275 	 * Account for TM requests, which use the last SMID.
10276 	 */
10277 	for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
10278 		if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
10279 			if (cmd->cmd_active_expiration <= timestamp) {
10280 				if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
10281 					/*
10282 					 * There seems to be a command stuck
10283 					 * in the active slot.  Drain throttle.
10284 					 */
10285 					mptsas_set_throttle(mpt,
10286 					    cmd->cmd_tgt_addr,
10287 					    DRAIN_THROTTLE);
10288 				} else if (cmd->cmd_flags &
10289 				    (CFLAG_PASSTHRU | CFLAG_CONFIG |
10290 				    CFLAG_FW_DIAG)) {
10291 					/*
10292 					 * passthrough command timeout
10293 					 */
10294 					cmd->cmd_flags |= (CFLAG_FINISHED |
10295 					    CFLAG_TIMEOUT);
10296 					cv_broadcast(&mpt->m_passthru_cv);
10297 					cv_broadcast(&mpt->m_config_cv);
10298 					cv_broadcast(&mpt->m_fw_diag_cv);
10299 				}
10300 			}
10301 		}
10302 	}
10303 
10304 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10305 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10306 		mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10307 	}
10308 
10309 	for (ptgt = refhash_first(mpt->m_tmp_targets); ptgt != NULL;
10310 	    ptgt = refhash_next(mpt->m_tmp_targets, ptgt)) {
10311 		mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10312 	}
10313 }
10314 
10315 /*
10316  * timeout recovery
10317  */
10318 static void
mptsas_cmd_timeout(mptsas_t * mpt,mptsas_target_t * ptgt)10319 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
10320 {
10321 	uint16_t	devhdl;
10322 	uint64_t	sas_wwn;
10323 	uint8_t		phy;
10324 	char		wwn_str[MPTSAS_WWN_STRLEN];
10325 
10326 	devhdl = ptgt->m_devhdl;
10327 	sas_wwn = ptgt->m_addr.mta_wwn;
10328 	phy = ptgt->m_phynum;
10329 	if (sas_wwn == 0) {
10330 		(void) sprintf(wwn_str, "p%x", phy);
10331 	} else {
10332 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
10333 	}
10334 
10335 	NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10336 	mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10337 	    "target %d %s, enclosure %u", devhdl, wwn_str,
10338 	    ptgt->m_enclosure);
10339 
10340 	/*
10341 	 * Abort all outstanding commands on the device.
10342 	 */
10343 	NDBG29(("mptsas_cmd_timeout: device reset"));
10344 	if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10345 		mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10346 		    "recovery failed!", devhdl);
10347 	}
10348 }
10349 
10350 /*
10351  * Device / Hotplug control
10352  */
10353 static int
mptsas_scsi_quiesce(dev_info_t * dip)10354 mptsas_scsi_quiesce(dev_info_t *dip)
10355 {
10356 	mptsas_t	*mpt;
10357 	scsi_hba_tran_t	*tran;
10358 
10359 	tran = ddi_get_driver_private(dip);
10360 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10361 		return (-1);
10362 
10363 	return (mptsas_quiesce_bus(mpt));
10364 }
10365 
10366 static int
mptsas_scsi_unquiesce(dev_info_t * dip)10367 mptsas_scsi_unquiesce(dev_info_t *dip)
10368 {
10369 	mptsas_t		*mpt;
10370 	scsi_hba_tran_t	*tran;
10371 
10372 	tran = ddi_get_driver_private(dip);
10373 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10374 		return (-1);
10375 
10376 	return (mptsas_unquiesce_bus(mpt));
10377 }
10378 
10379 static int
mptsas_quiesce_bus(mptsas_t * mpt)10380 mptsas_quiesce_bus(mptsas_t *mpt)
10381 {
10382 	mptsas_target_t	*ptgt = NULL;
10383 
10384 	NDBG28(("mptsas_quiesce_bus"));
10385 	mutex_enter(&mpt->m_mutex);
10386 
10387 	/* Set all the throttles to zero */
10388 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10389 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10390 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10391 	}
10392 
10393 	/* If there are any outstanding commands in the queue */
10394 	if (mpt->m_ncmds) {
10395 		mpt->m_softstate |= MPTSAS_SS_DRAINING;
10396 		mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10397 		    mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10398 		if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10399 			/*
10400 			 * Quiesce has been interrupted
10401 			 */
10402 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10403 			for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10404 			    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10405 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10406 			}
10407 			mptsas_restart_hba(mpt);
10408 			if (mpt->m_quiesce_timeid != 0) {
10409 				timeout_id_t tid = mpt->m_quiesce_timeid;
10410 				mpt->m_quiesce_timeid = 0;
10411 				mutex_exit(&mpt->m_mutex);
10412 				(void) untimeout(tid);
10413 				return (-1);
10414 			}
10415 			mutex_exit(&mpt->m_mutex);
10416 			return (-1);
10417 		} else {
10418 			/* Bus has been quiesced */
10419 			ASSERT(mpt->m_quiesce_timeid == 0);
10420 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10421 			mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10422 			mutex_exit(&mpt->m_mutex);
10423 			return (0);
10424 		}
10425 	}
10426 	/* Bus was not busy - QUIESCED */
10427 	mutex_exit(&mpt->m_mutex);
10428 
10429 	return (0);
10430 }
10431 
10432 static int
mptsas_unquiesce_bus(mptsas_t * mpt)10433 mptsas_unquiesce_bus(mptsas_t *mpt)
10434 {
10435 	mptsas_target_t	*ptgt = NULL;
10436 
10437 	NDBG28(("mptsas_unquiesce_bus"));
10438 	mutex_enter(&mpt->m_mutex);
10439 	mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10440 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10441 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10442 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10443 	}
10444 	mptsas_restart_hba(mpt);
10445 	mutex_exit(&mpt->m_mutex);
10446 	return (0);
10447 }
10448 
10449 static void
mptsas_ncmds_checkdrain(void * arg)10450 mptsas_ncmds_checkdrain(void *arg)
10451 {
10452 	mptsas_t	*mpt = arg;
10453 	mptsas_target_t	*ptgt = NULL;
10454 
10455 	mutex_enter(&mpt->m_mutex);
10456 	if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10457 		mpt->m_quiesce_timeid = 0;
10458 		if (mpt->m_ncmds == 0) {
10459 			/* Command queue has been drained */
10460 			cv_signal(&mpt->m_cv);
10461 		} else {
10462 			/*
10463 			 * The throttle may have been reset because
10464 			 * of a SCSI bus reset
10465 			 */
10466 			for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10467 			    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10468 				mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10469 			}
10470 
10471 			mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10472 			    mpt, (MPTSAS_QUIESCE_TIMEOUT *
10473 			    drv_usectohz(1000000)));
10474 		}
10475 	}
10476 	mutex_exit(&mpt->m_mutex);
10477 }
10478 
10479 /*ARGSUSED*/
10480 static void
mptsas_dump_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)10481 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10482 {
10483 	int	i;
10484 	uint8_t	*cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10485 	char	buf[128];
10486 
10487 	buf[0] = '\0';
10488 	NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10489 	    Tgt(cmd), Lun(cmd)));
10490 	(void) sprintf(&buf[0], "\tcdb=[");
10491 	for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10492 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10493 	}
10494 	(void) sprintf(&buf[strlen(buf)], " ]");
10495 	NDBG25(("?%s\n", buf));
10496 	NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10497 	    cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10498 	    cmd->cmd_pkt->pkt_state));
10499 	NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10500 	    *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10501 }
10502 
10503 static void
mptsas_passthru_sge(ddi_acc_handle_t acc_hdl,mptsas_pt_request_t * pt,pMpi2SGESimple64_t sgep)10504 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10505     pMpi2SGESimple64_t sgep)
10506 {
10507 	uint32_t		sge_flags;
10508 	uint32_t		data_size, dataout_size;
10509 	ddi_dma_cookie_t	data_cookie;
10510 	ddi_dma_cookie_t	dataout_cookie;
10511 
10512 	data_size = pt->data_size;
10513 	dataout_size = pt->dataout_size;
10514 	data_cookie = pt->data_cookie;
10515 	dataout_cookie = pt->dataout_cookie;
10516 
10517 	if (dataout_size) {
10518 		sge_flags = dataout_size |
10519 		    ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10520 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
10521 		    MPI2_SGE_FLAGS_HOST_TO_IOC |
10522 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10523 		    MPI2_SGE_FLAGS_SHIFT);
10524 		ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10525 		ddi_put32(acc_hdl, &sgep->Address.Low,
10526 		    (uint32_t)(dataout_cookie.dmac_laddress &
10527 		    0xffffffffull));
10528 		ddi_put32(acc_hdl, &sgep->Address.High,
10529 		    (uint32_t)(dataout_cookie.dmac_laddress
10530 		    >> 32));
10531 		sgep++;
10532 	}
10533 	sge_flags = data_size;
10534 	sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10535 	    MPI2_SGE_FLAGS_LAST_ELEMENT |
10536 	    MPI2_SGE_FLAGS_END_OF_BUFFER |
10537 	    MPI2_SGE_FLAGS_END_OF_LIST |
10538 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10539 	    MPI2_SGE_FLAGS_SHIFT);
10540 	if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10541 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10542 		    MPI2_SGE_FLAGS_SHIFT);
10543 	} else {
10544 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10545 		    MPI2_SGE_FLAGS_SHIFT);
10546 	}
10547 	ddi_put32(acc_hdl, &sgep->FlagsLength,
10548 	    sge_flags);
10549 	ddi_put32(acc_hdl, &sgep->Address.Low,
10550 	    (uint32_t)(data_cookie.dmac_laddress &
10551 	    0xffffffffull));
10552 	ddi_put32(acc_hdl, &sgep->Address.High,
10553 	    (uint32_t)(data_cookie.dmac_laddress >> 32));
10554 }
10555 
10556 static void
mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl,mptsas_pt_request_t * pt,pMpi2IeeeSgeSimple64_t ieeesgep)10557 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10558     pMpi2IeeeSgeSimple64_t ieeesgep)
10559 {
10560 	uint8_t			sge_flags;
10561 	uint32_t		data_size, dataout_size;
10562 	ddi_dma_cookie_t	data_cookie;
10563 	ddi_dma_cookie_t	dataout_cookie;
10564 
10565 	data_size = pt->data_size;
10566 	dataout_size = pt->dataout_size;
10567 	data_cookie = pt->data_cookie;
10568 	dataout_cookie = pt->dataout_cookie;
10569 
10570 	sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10571 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10572 	if (dataout_size) {
10573 		ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10574 		ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10575 		    (uint32_t)(dataout_cookie.dmac_laddress &
10576 		    0xffffffffull));
10577 		ddi_put32(acc_hdl, &ieeesgep->Address.High,
10578 		    (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10579 		ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10580 		ieeesgep++;
10581 	}
10582 	sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10583 	ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10584 	ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10585 	    (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10586 	ddi_put32(acc_hdl, &ieeesgep->Address.High,
10587 	    (uint32_t)(data_cookie.dmac_laddress >> 32));
10588 	ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10589 }
10590 
10591 static void
mptsas_start_passthru(mptsas_t * mpt,mptsas_cmd_t * cmd)10592 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10593 {
10594 	caddr_t			memp;
10595 	pMPI2RequestHeader_t	request_hdrp;
10596 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
10597 	mptsas_pt_request_t	*pt = pkt->pkt_ha_private;
10598 	uint32_t		request_size;
10599 	uint32_t		i;
10600 	uint64_t		request_desc = 0;
10601 	uint8_t			desc_type;
10602 	uint16_t		SMID;
10603 	uint8_t			*request, function;
10604 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
10605 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
10606 
10607 	desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10608 
10609 	request = pt->request;
10610 	request_size = pt->request_size;
10611 
10612 	SMID = cmd->cmd_slot;
10613 
10614 	/*
10615 	 * Store the passthrough message in memory location
10616 	 * corresponding to our slot number
10617 	 */
10618 	memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10619 	request_hdrp = (pMPI2RequestHeader_t)memp;
10620 	bzero(memp, mpt->m_req_frame_size);
10621 
10622 	for (i = 0; i < request_size; i++) {
10623 		bcopy(request + i, memp + i, 1);
10624 	}
10625 
10626 	NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10627 	    "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10628 	    request_hdrp->MsgFlags, request_size,
10629 	    pt->data_size, pt->dataout_size, SMID));
10630 
10631 	/*
10632 	 * Add an SGE, even if the length is zero.
10633 	 */
10634 	if (mpt->m_MPI25 && pt->simple == 0) {
10635 		mptsas_passthru_ieee_sge(acc_hdl, pt,
10636 		    (pMpi2IeeeSgeSimple64_t)
10637 		    ((uint8_t *)request_hdrp + pt->sgl_offset));
10638 	} else {
10639 		mptsas_passthru_sge(acc_hdl, pt,
10640 		    (pMpi2SGESimple64_t)
10641 		    ((uint8_t *)request_hdrp + pt->sgl_offset));
10642 	}
10643 
10644 	function = request_hdrp->Function;
10645 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10646 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10647 		pMpi2SCSIIORequest_t	scsi_io_req;
10648 		caddr_t			arsbuf;
10649 		uint8_t			ars_size;
10650 		uint32_t		ars_dmaaddrlow;
10651 
10652 		NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10653 		scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10654 
10655 		if (cmd->cmd_extrqslen != 0) {
10656 			/*
10657 			 * Mapping of the buffer was done in
10658 			 * mptsas_do_passthru().
10659 			 * Calculate the DMA address with the same offset.
10660 			 */
10661 			arsbuf = cmd->cmd_arq_buf;
10662 			ars_size = cmd->cmd_extrqslen;
10663 			ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10664 			    ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10665 			    0xffffffffu;
10666 		} else {
10667 			arsbuf = mpt->m_req_sense +
10668 			    (mpt->m_req_sense_size * (SMID-1));
10669 			cmd->cmd_arq_buf = arsbuf;
10670 			ars_size = mpt->m_req_sense_size;
10671 			ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10672 			    (mpt->m_req_sense_size * (SMID-1))) &
10673 			    0xffffffffu;
10674 		}
10675 		bzero(arsbuf, ars_size);
10676 
10677 		ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10678 		ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10679 		    ars_dmaaddrlow);
10680 
10681 		/*
10682 		 * Put SGE for data and data_out buffer at the end of
10683 		 * scsi_io_request message header.(64 bytes in total)
10684 		 * Set SGLOffset0 value
10685 		 */
10686 		ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10687 		    offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10688 
10689 		/*
10690 		 * Setup descriptor info.  RAID passthrough must use the
10691 		 * default request descriptor which is already set, so if this
10692 		 * is a SCSI IO request, change the descriptor to SCSI IO.
10693 		 */
10694 		if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10695 			desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10696 			request_desc = ((uint64_t)ddi_get16(acc_hdl,
10697 			    &scsi_io_req->DevHandle) << 48);
10698 		}
10699 		(void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10700 		    DDI_DMA_SYNC_FORDEV);
10701 	}
10702 
10703 	/*
10704 	 * We must wait till the message has been completed before
10705 	 * beginning the next message so we wait for this one to
10706 	 * finish.
10707 	 */
10708 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10709 	request_desc |= (SMID << 16) + desc_type;
10710 	cmd->cmd_rfm = 0;
10711 	MPTSAS_START_CMD(mpt, request_desc);
10712 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10713 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10714 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10715 	}
10716 }
10717 
10718 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10719 static mptsas_pre_f	mpi_pre_ioc_facts;
10720 static mptsas_pre_f	mpi_pre_port_facts;
10721 static mptsas_pre_f	mpi_pre_fw_download;
10722 static mptsas_pre_f	mpi_pre_fw_25_download;
10723 static mptsas_pre_f	mpi_pre_fw_upload;
10724 static mptsas_pre_f	mpi_pre_fw_25_upload;
10725 static mptsas_pre_f	mpi_pre_sata_passthrough;
10726 static mptsas_pre_f	mpi_pre_smp_passthrough;
10727 static mptsas_pre_f	mpi_pre_config;
10728 static mptsas_pre_f	mpi_pre_sas_io_unit_control;
10729 static mptsas_pre_f	mpi_pre_scsi_io_req;
10730 
10731 /*
10732  * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10733  */
10734 static void
mpi_pre_fw_download(mptsas_t * mpt,mptsas_pt_request_t * pt)10735 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10736 {
10737 	pMpi2FWDownloadTCSGE_t tcsge;
10738 	pMpi2FWDownloadRequest req;
10739 
10740 	/*
10741 	 * If SAS3, call separate function.
10742 	 */
10743 	if (mpt->m_MPI25) {
10744 		mpi_pre_fw_25_download(mpt, pt);
10745 		return;
10746 	}
10747 
10748 	/*
10749 	 * User requests should come in with the Transaction
10750 	 * context element where the SGL will go. Putting the
10751 	 * SGL after that seems to work, but don't really know
10752 	 * why. Other drivers tend to create an extra SGL and
10753 	 * refer to the TCE through that.
10754 	 */
10755 	req = (pMpi2FWDownloadRequest)pt->request;
10756 	tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10757 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10758 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10759 		mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10760 	}
10761 
10762 	pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10763 	    sizeof (*tcsge);
10764 	if (pt->request_size != pt->sgl_offset) {
10765 		NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10766 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10767 		    (int)pt->request_size, (int)pt->sgl_offset,
10768 		    (int)pt->dataout_size));
10769 	}
10770 	if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10771 		NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10772 		    "0x%x, should be 0x%x", pt->data_size,
10773 		    (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10774 	}
10775 }
10776 
10777 /*
10778  * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10779  */
10780 static void
mpi_pre_fw_25_download(mptsas_t * mpt,mptsas_pt_request_t * pt)10781 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10782 {
10783 	pMpi2FWDownloadTCSGE_t tcsge;
10784 	pMpi2FWDownloadRequest req2;
10785 	pMpi25FWDownloadRequest req25;
10786 
10787 	/*
10788 	 * User requests should come in with the Transaction
10789 	 * context element where the SGL will go. The new firmware
10790 	 * Doesn't use TCE and has space in the main request for
10791 	 * this information. So move to the right place.
10792 	 */
10793 	req2 = (pMpi2FWDownloadRequest)pt->request;
10794 	req25 = (pMpi25FWDownloadRequest)pt->request;
10795 	tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10796 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10797 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10798 		mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10799 	}
10800 	req25->ImageOffset = tcsge->ImageOffset;
10801 	req25->ImageSize = tcsge->ImageSize;
10802 
10803 	pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10804 	if (pt->request_size != pt->sgl_offset) {
10805 		NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10806 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10807 		    pt->request_size, pt->sgl_offset,
10808 		    pt->dataout_size));
10809 	}
10810 	if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10811 		NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10812 		    "0x%x, should be 0x%x", pt->data_size,
10813 		    (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10814 	}
10815 }
10816 
10817 /*
10818  * Prepare the pt for a SAS2 FW_UPLOAD request.
10819  */
10820 static void
mpi_pre_fw_upload(mptsas_t * mpt,mptsas_pt_request_t * pt)10821 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10822 {
10823 	pMpi2FWUploadTCSGE_t tcsge;
10824 	pMpi2FWUploadRequest_t req;
10825 
10826 	/*
10827 	 * If SAS3, call separate function.
10828 	 */
10829 	if (mpt->m_MPI25) {
10830 		mpi_pre_fw_25_upload(mpt, pt);
10831 		return;
10832 	}
10833 
10834 	/*
10835 	 * User requests should come in with the Transaction
10836 	 * context element where the SGL will go. Putting the
10837 	 * SGL after that seems to work, but don't really know
10838 	 * why. Other drivers tend to create an extra SGL and
10839 	 * refer to the TCE through that.
10840 	 */
10841 	req = (pMpi2FWUploadRequest_t)pt->request;
10842 	tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10843 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10844 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10845 		mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10846 	}
10847 
10848 	pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10849 	    sizeof (*tcsge);
10850 	if (pt->request_size != pt->sgl_offset) {
10851 		NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10852 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10853 		    pt->request_size, pt->sgl_offset,
10854 		    pt->dataout_size));
10855 	}
10856 	if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10857 		NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10858 		    "0x%x, should be 0x%x", pt->data_size,
10859 		    (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10860 	}
10861 }
10862 
10863 /*
10864  * Prepare the pt a SAS3 FW_UPLOAD request.
10865  */
10866 static void
mpi_pre_fw_25_upload(mptsas_t * mpt,mptsas_pt_request_t * pt)10867 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10868 {
10869 	pMpi2FWUploadTCSGE_t tcsge;
10870 	pMpi2FWUploadRequest_t req2;
10871 	pMpi25FWUploadRequest_t req25;
10872 
10873 	/*
10874 	 * User requests should come in with the Transaction
10875 	 * context element where the SGL will go. The new firmware
10876 	 * Doesn't use TCE and has space in the main request for
10877 	 * this information. So move to the right place.
10878 	 */
10879 	req2 = (pMpi2FWUploadRequest_t)pt->request;
10880 	req25 = (pMpi25FWUploadRequest_t)pt->request;
10881 	tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10882 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10883 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10884 		mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10885 	}
10886 	req25->ImageOffset = tcsge->ImageOffset;
10887 	req25->ImageSize = tcsge->ImageSize;
10888 
10889 	pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10890 	if (pt->request_size != pt->sgl_offset) {
10891 		NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10892 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10893 		    pt->request_size, pt->sgl_offset,
10894 		    pt->dataout_size));
10895 	}
10896 	if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10897 		NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10898 		    "0x%x, should be 0x%x", pt->data_size,
10899 		    (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10900 	}
10901 }
10902 
10903 /*
10904  * Prepare the pt for an IOC_FACTS request.
10905  */
10906 static void
mpi_pre_ioc_facts(mptsas_t * mpt,mptsas_pt_request_t * pt)10907 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10908 {
10909 #ifndef __lock_lint
10910 	_NOTE(ARGUNUSED(mpt))
10911 #endif
10912 	if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST)) {
10913 		NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10914 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10915 		    pt->request_size,
10916 		    (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10917 		    pt->dataout_size));
10918 	}
10919 	if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY)) {
10920 		NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10921 		    "0x%x, should be 0x%x", pt->data_size,
10922 		    (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10923 	}
10924 	pt->sgl_offset = (uint16_t)pt->request_size;
10925 }
10926 
10927 /*
10928  * Prepare the pt for a PORT_FACTS request.
10929  */
10930 static void
mpi_pre_port_facts(mptsas_t * mpt,mptsas_pt_request_t * pt)10931 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10932 {
10933 #ifndef __lock_lint
10934 	_NOTE(ARGUNUSED(mpt))
10935 #endif
10936 	if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST)) {
10937 		NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10938 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10939 		    pt->request_size,
10940 		    (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10941 		    pt->dataout_size));
10942 	}
10943 	if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY)) {
10944 		NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10945 		    "0x%x, should be 0x%x", pt->data_size,
10946 		    (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10947 	}
10948 	pt->sgl_offset = (uint16_t)pt->request_size;
10949 }
10950 
10951 /*
10952  * Prepare pt for a SATA_PASSTHROUGH request.
10953  */
10954 static void
mpi_pre_sata_passthrough(mptsas_t * mpt,mptsas_pt_request_t * pt)10955 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10956 {
10957 #ifndef __lock_lint
10958 	_NOTE(ARGUNUSED(mpt))
10959 #endif
10960 	pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10961 	if (pt->request_size != pt->sgl_offset) {
10962 		NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10963 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10964 		    pt->request_size, pt->sgl_offset,
10965 		    pt->dataout_size));
10966 	}
10967 	if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY)) {
10968 		NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10969 		    "0x%x, should be 0x%x", pt->data_size,
10970 		    (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10971 	}
10972 }
10973 
10974 static void
mpi_pre_smp_passthrough(mptsas_t * mpt,mptsas_pt_request_t * pt)10975 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10976 {
10977 #ifndef __lock_lint
10978 	_NOTE(ARGUNUSED(mpt))
10979 #endif
10980 	pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10981 	if (pt->request_size != pt->sgl_offset) {
10982 		NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10983 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10984 		    pt->request_size, pt->sgl_offset,
10985 		    pt->dataout_size));
10986 	}
10987 	if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY)) {
10988 		NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10989 		    "0x%x, should be 0x%x", pt->data_size,
10990 		    (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10991 	}
10992 }
10993 
10994 /*
10995  * Prepare pt for a CONFIG request.
10996  */
10997 static void
mpi_pre_config(mptsas_t * mpt,mptsas_pt_request_t * pt)10998 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10999 {
11000 #ifndef __lock_lint
11001 	_NOTE(ARGUNUSED(mpt))
11002 #endif
11003 	pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
11004 	if (pt->request_size != pt->sgl_offset) {
11005 		NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11006 		    "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11007 		    pt->sgl_offset, pt->dataout_size));
11008 	}
11009 	if (pt->data_size != sizeof (MPI2_CONFIG_REPLY)) {
11010 		NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11011 		    "should be 0x%x", pt->data_size,
11012 		    (int)sizeof (MPI2_CONFIG_REPLY)));
11013 	}
11014 	pt->simple = 1;
11015 }
11016 
11017 /*
11018  * Prepare pt for a SCSI_IO_REQ request.
11019  */
11020 static void
mpi_pre_scsi_io_req(mptsas_t * mpt,mptsas_pt_request_t * pt)11021 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
11022 {
11023 #ifndef __lock_lint
11024 	_NOTE(ARGUNUSED(mpt))
11025 #endif
11026 	pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
11027 	if (pt->request_size != pt->sgl_offset) {
11028 		NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11029 		    "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11030 		    pt->sgl_offset,
11031 		    pt->dataout_size));
11032 	}
11033 	if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY)) {
11034 		NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11035 		    "should be 0x%x", pt->data_size,
11036 		    (int)sizeof (MPI2_SCSI_IO_REPLY)));
11037 	}
11038 }
11039 
11040 /*
11041  * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
11042  */
11043 static void
mpi_pre_sas_io_unit_control(mptsas_t * mpt,mptsas_pt_request_t * pt)11044 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
11045 {
11046 #ifndef __lock_lint
11047 	_NOTE(ARGUNUSED(mpt))
11048 #endif
11049 	pt->sgl_offset = (uint16_t)pt->request_size;
11050 }
11051 
11052 /*
11053  * A set of functions to prepare an mptsas_cmd for the various
11054  * supported requests.
11055  */
11056 static struct mptsas_func {
11057 	U8		Function;
11058 	char		*Name;
11059 	mptsas_pre_f	*f_pre;
11060 } mptsas_func_list[] = {
11061 	{ MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS",		mpi_pre_ioc_facts },
11062 	{ MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS",	mpi_pre_port_facts },
11063 	{ MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD",	mpi_pre_fw_download },
11064 	{ MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD",		mpi_pre_fw_upload },
11065 	{ MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
11066 	    mpi_pre_sata_passthrough },
11067 	{ MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
11068 	    mpi_pre_smp_passthrough},
11069 	{ MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
11070 	    mpi_pre_scsi_io_req},
11071 	{ MPI2_FUNCTION_CONFIG, "CONFIG",		mpi_pre_config},
11072 	{ MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
11073 	    mpi_pre_sas_io_unit_control },
11074 	{ 0xFF, NULL,				NULL } /* list end */
11075 };
11076 
11077 static void
mptsas_prep_sgl_offset(mptsas_t * mpt,mptsas_pt_request_t * pt)11078 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
11079 {
11080 	pMPI2RequestHeader_t	hdr;
11081 	struct mptsas_func	*f;
11082 
11083 	hdr = (pMPI2RequestHeader_t)pt->request;
11084 
11085 	for (f = mptsas_func_list; f->f_pre != NULL; f++) {
11086 		if (hdr->Function == f->Function) {
11087 			f->f_pre(mpt, pt);
11088 			NDBG15(("mptsas_prep_sgl_offset: Function %s,"
11089 			    " sgl_offset 0x%x", f->Name,
11090 			    pt->sgl_offset));
11091 			return;
11092 		}
11093 	}
11094 	NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
11095 	    " returning req_size 0x%x for sgl_offset",
11096 	    hdr->Function, pt->request_size));
11097 	pt->sgl_offset = (uint16_t)pt->request_size;
11098 }
11099 
11100 
11101 static int
mptsas_do_passthru(mptsas_t * mpt,uint8_t * request,uint8_t * reply,uint8_t * data,uint32_t request_size,uint32_t reply_size,uint32_t data_size,uint32_t direction,uint8_t * dataout,uint32_t dataout_size,short timeout,int mode)11102 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
11103     uint8_t *data, uint32_t request_size, uint32_t reply_size,
11104     uint32_t data_size, uint32_t direction, uint8_t *dataout,
11105     uint32_t dataout_size, short timeout, int mode)
11106 {
11107 	mptsas_pt_request_t		pt;
11108 	mptsas_dma_alloc_state_t	data_dma_state;
11109 	mptsas_dma_alloc_state_t	dataout_dma_state;
11110 	caddr_t				memp;
11111 	mptsas_cmd_t			*cmd = NULL;
11112 	struct scsi_pkt			*pkt;
11113 	uint32_t			reply_len = 0, sense_len = 0;
11114 	pMPI2RequestHeader_t		request_hdrp;
11115 	pMPI2RequestHeader_t		request_msg;
11116 	pMPI2DefaultReply_t		reply_msg;
11117 	Mpi2SCSIIOReply_t		rep_msg;
11118 	int				rvalue;
11119 	int				i, status = 0, pt_flags = 0, rv = 0;
11120 	uint8_t				function;
11121 
11122 	ASSERT(mutex_owned(&mpt->m_mutex));
11123 
11124 	reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
11125 	bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
11126 	request_msg = kmem_zalloc(request_size, KM_SLEEP);
11127 
11128 	mutex_exit(&mpt->m_mutex);
11129 	/*
11130 	 * copy in the request buffer since it could be used by
11131 	 * another thread when the pt request into waitq
11132 	 */
11133 	if (ddi_copyin(request, request_msg, request_size, mode)) {
11134 		mutex_enter(&mpt->m_mutex);
11135 		status = EFAULT;
11136 		mptsas_log(mpt, CE_WARN, "failed to copy request data");
11137 		goto out;
11138 	}
11139 	NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
11140 	    mode, request_size, request_msg->Function));
11141 	mutex_enter(&mpt->m_mutex);
11142 
11143 	function = request_msg->Function;
11144 	if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
11145 		pMpi2SCSITaskManagementRequest_t	task;
11146 		task = (pMpi2SCSITaskManagementRequest_t)request_msg;
11147 		mptsas_setup_bus_reset_delay(mpt);
11148 		rv = mptsas_ioc_task_management(mpt, task->TaskType,
11149 		    task->DevHandle, (int)task->LUN[1], reply, reply_size,
11150 		    mode);
11151 
11152 		if (rv != TRUE) {
11153 			status = EIO;
11154 			mptsas_log(mpt, CE_WARN, "task management failed");
11155 		}
11156 		goto out;
11157 	}
11158 
11159 	if (data_size != 0) {
11160 		data_dma_state.size = data_size;
11161 		if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
11162 			status = ENOMEM;
11163 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11164 			    "resource");
11165 			goto out;
11166 		}
11167 		pt_flags |= MPTSAS_DATA_ALLOCATED;
11168 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11169 			mutex_exit(&mpt->m_mutex);
11170 			for (i = 0; i < data_size; i++) {
11171 				if (ddi_copyin(data + i, (uint8_t *)
11172 				    data_dma_state.memp + i, 1, mode)) {
11173 					mutex_enter(&mpt->m_mutex);
11174 					status = EFAULT;
11175 					mptsas_log(mpt, CE_WARN, "failed to "
11176 					    "copy read data");
11177 					goto out;
11178 				}
11179 			}
11180 			mutex_enter(&mpt->m_mutex);
11181 		}
11182 	} else {
11183 		bzero(&data_dma_state, sizeof (data_dma_state));
11184 	}
11185 
11186 	if (dataout_size != 0) {
11187 		dataout_dma_state.size = dataout_size;
11188 		if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
11189 			status = ENOMEM;
11190 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11191 			    "resource");
11192 			goto out;
11193 		}
11194 		pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
11195 		mutex_exit(&mpt->m_mutex);
11196 		for (i = 0; i < dataout_size; i++) {
11197 			if (ddi_copyin(dataout + i, (uint8_t *)
11198 			    dataout_dma_state.memp + i, 1, mode)) {
11199 				mutex_enter(&mpt->m_mutex);
11200 				mptsas_log(mpt, CE_WARN, "failed to copy out"
11201 				    " data");
11202 				status = EFAULT;
11203 				goto out;
11204 			}
11205 		}
11206 		mutex_enter(&mpt->m_mutex);
11207 	} else {
11208 		bzero(&dataout_dma_state, sizeof (dataout_dma_state));
11209 	}
11210 
11211 	if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11212 		status = EAGAIN;
11213 		mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
11214 		goto out;
11215 	}
11216 	pt_flags |= MPTSAS_REQUEST_POOL_CMD;
11217 
11218 	bzero((caddr_t)cmd, sizeof (*cmd));
11219 	bzero((caddr_t)pkt, scsi_pkt_size());
11220 	bzero((caddr_t)&pt, sizeof (pt));
11221 
11222 	cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11223 
11224 	pt.request = (uint8_t *)request_msg;
11225 	pt.direction = direction;
11226 	pt.simple = 0;
11227 	pt.request_size = request_size;
11228 	pt.data_size = data_size;
11229 	pt.dataout_size = dataout_size;
11230 	pt.data_cookie = data_dma_state.cookie;
11231 	pt.dataout_cookie = dataout_dma_state.cookie;
11232 	mptsas_prep_sgl_offset(mpt, &pt);
11233 
11234 	/*
11235 	 * Form a blank cmd/pkt to store the acknowledgement message
11236 	 */
11237 	pkt->pkt_cdbp		= (opaque_t)&cmd->cmd_cdb[0];
11238 	pkt->pkt_scbp		= (opaque_t)&cmd->cmd_scb;
11239 	pkt->pkt_ha_private	= (opaque_t)&pt;
11240 	pkt->pkt_flags		= FLAG_HEAD;
11241 	pkt->pkt_time		= timeout;
11242 	cmd->cmd_pkt		= pkt;
11243 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_PASSTHRU;
11244 
11245 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11246 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11247 		uint8_t			com, cdb_group_id;
11248 		boolean_t		ret;
11249 
11250 		pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11251 		com = pkt->pkt_cdbp[0];
11252 		cdb_group_id = CDB_GROUPID(com);
11253 		switch (cdb_group_id) {
11254 		case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11255 		case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11256 		case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11257 		case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11258 		case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11259 		default:
11260 			NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11261 			    "CDBGROUP 0x%x requested!", cdb_group_id));
11262 			break;
11263 		}
11264 
11265 		reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11266 		sense_len = reply_size - reply_len;
11267 		ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
11268 		VERIFY(ret == B_TRUE);
11269 	} else {
11270 		reply_len = reply_size;
11271 		sense_len = 0;
11272 	}
11273 
11274 	NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
11275 	    "snslen 0x%x",
11276 	    (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
11277 	    data_size, dataout_size, reply_len, sense_len));
11278 
11279 	/*
11280 	 * Save the command in a slot
11281 	 */
11282 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11283 		/*
11284 		 * Once passthru command get slot, set cmd_flags
11285 		 * CFLAG_PREPARED.
11286 		 */
11287 		cmd->cmd_flags |= CFLAG_PREPARED;
11288 		mptsas_start_passthru(mpt, cmd);
11289 	} else {
11290 		mptsas_waitq_add(mpt, cmd);
11291 	}
11292 
11293 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11294 		cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
11295 	}
11296 
11297 	NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
11298 	    "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
11299 	    pkt->pkt_reason));
11300 
11301 	if (cmd->cmd_flags & CFLAG_PREPARED) {
11302 		memp = mpt->m_req_frame + (mpt->m_req_frame_size *
11303 		    cmd->cmd_slot);
11304 		request_hdrp = (pMPI2RequestHeader_t)memp;
11305 	}
11306 
11307 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11308 		status = ETIMEDOUT;
11309 		mptsas_log(mpt, CE_WARN, "passthrough command timeout");
11310 		pt_flags |= MPTSAS_CMD_TIMEOUT;
11311 		goto out;
11312 	}
11313 
11314 	if (cmd->cmd_rfm) {
11315 		/*
11316 		 * cmd_rfm is zero means the command reply is a CONTEXT
11317 		 * reply and no PCI Write to post the free reply SMFA
11318 		 * because no reply message frame is used.
11319 		 * cmd_rfm is non-zero means the reply is a ADDRESS
11320 		 * reply and reply message frame is used.
11321 		 */
11322 		pt_flags |= MPTSAS_ADDRESS_REPLY;
11323 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11324 		    DDI_DMA_SYNC_FORCPU);
11325 		reply_msg = (pMPI2DefaultReply_t)
11326 		    (mpt->m_reply_frame + (cmd->cmd_rfm -
11327 		    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11328 	}
11329 
11330 	mptsas_fma_check(mpt, cmd);
11331 	if (pkt->pkt_reason == CMD_TRAN_ERR) {
11332 		status = EAGAIN;
11333 		mptsas_log(mpt, CE_WARN, "passthru fma error");
11334 		goto out;
11335 	}
11336 	if (pkt->pkt_reason == CMD_RESET) {
11337 		status = EAGAIN;
11338 		mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
11339 		goto out;
11340 	}
11341 
11342 	if (pkt->pkt_reason == CMD_INCOMPLETE) {
11343 		status = EIO;
11344 		mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
11345 		goto out;
11346 	}
11347 
11348 	mutex_exit(&mpt->m_mutex);
11349 	if (cmd->cmd_flags & CFLAG_PREPARED) {
11350 		function = request_hdrp->Function;
11351 		if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11352 		    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11353 			reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11354 			sense_len = cmd->cmd_extrqslen ?
11355 			    min(sense_len, cmd->cmd_extrqslen) :
11356 			    min(sense_len, cmd->cmd_rqslen);
11357 		} else {
11358 			reply_len = reply_size;
11359 			sense_len = 0;
11360 		}
11361 
11362 		for (i = 0; i < reply_len; i++) {
11363 			if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11364 			    mode)) {
11365 				mutex_enter(&mpt->m_mutex);
11366 				status = EFAULT;
11367 				mptsas_log(mpt, CE_WARN, "failed to copy out "
11368 				    "reply data");
11369 				goto out;
11370 			}
11371 		}
11372 		for (i = 0; i < sense_len; i++) {
11373 			if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11374 			    reply + reply_len + i, 1, mode)) {
11375 				mutex_enter(&mpt->m_mutex);
11376 				status = EFAULT;
11377 				mptsas_log(mpt, CE_WARN, "failed to copy out "
11378 				    "sense data");
11379 				goto out;
11380 			}
11381 		}
11382 	}
11383 
11384 	if (data_size) {
11385 		if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11386 			(void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11387 			    DDI_DMA_SYNC_FORCPU);
11388 			for (i = 0; i < data_size; i++) {
11389 				if (ddi_copyout((uint8_t *)(
11390 				    data_dma_state.memp + i), data + i,  1,
11391 				    mode)) {
11392 					mutex_enter(&mpt->m_mutex);
11393 					status = EFAULT;
11394 					mptsas_log(mpt, CE_WARN, "failed to "
11395 					    "copy out the reply data");
11396 					goto out;
11397 				}
11398 			}
11399 		}
11400 	}
11401 	mutex_enter(&mpt->m_mutex);
11402 out:
11403 	/*
11404 	 * Put the reply frame back on the free queue, increment the free
11405 	 * index, and write the new index to the free index register.  But only
11406 	 * if this reply is an ADDRESS reply.
11407 	 */
11408 	if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11409 		ddi_put32(mpt->m_acc_free_queue_hdl,
11410 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11411 		    cmd->cmd_rfm);
11412 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11413 		    DDI_DMA_SYNC_FORDEV);
11414 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11415 			mpt->m_free_index = 0;
11416 		}
11417 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11418 		    mpt->m_free_index);
11419 	}
11420 	if (cmd) {
11421 		if (cmd->cmd_extrqslen != 0) {
11422 			rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11423 			    cmd->cmd_extrqsidx + 1);
11424 		}
11425 		if (cmd->cmd_flags & CFLAG_PREPARED) {
11426 			mptsas_remove_cmd(mpt, cmd);
11427 			pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11428 		}
11429 	}
11430 	if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11431 		mptsas_return_to_pool(mpt, cmd);
11432 	if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11433 		if (mptsas_check_dma_handle(data_dma_state.handle) !=
11434 		    DDI_SUCCESS) {
11435 			ddi_fm_service_impact(mpt->m_dip,
11436 			    DDI_SERVICE_UNAFFECTED);
11437 			status = EFAULT;
11438 		}
11439 		mptsas_dma_free(&data_dma_state);
11440 	}
11441 	if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11442 		if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11443 		    DDI_SUCCESS) {
11444 			ddi_fm_service_impact(mpt->m_dip,
11445 			    DDI_SERVICE_UNAFFECTED);
11446 			status = EFAULT;
11447 		}
11448 		mptsas_dma_free(&dataout_dma_state);
11449 	}
11450 	if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11451 		if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11452 			mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11453 		}
11454 	}
11455 	if (request_msg)
11456 		kmem_free(request_msg, request_size);
11457 	NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11458 
11459 	return (status);
11460 }
11461 
11462 static int
mptsas_pass_thru(mptsas_t * mpt,mptsas_pass_thru_t * data,int mode)11463 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11464 {
11465 	/*
11466 	 * If timeout is 0, set timeout to default of 60 seconds.
11467 	 */
11468 	if (data->Timeout == 0) {
11469 		data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11470 	}
11471 
11472 	if (((data->DataSize == 0) &&
11473 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11474 	    ((data->DataSize != 0) &&
11475 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11476 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11477 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11478 	    (data->DataOutSize != 0))))) {
11479 		if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11480 			data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11481 		} else {
11482 			data->DataOutSize = 0;
11483 		}
11484 		/*
11485 		 * Send passthru request messages
11486 		 */
11487 		return (mptsas_do_passthru(mpt,
11488 		    (uint8_t *)((uintptr_t)data->PtrRequest),
11489 		    (uint8_t *)((uintptr_t)data->PtrReply),
11490 		    (uint8_t *)((uintptr_t)data->PtrData),
11491 		    data->RequestSize, data->ReplySize,
11492 		    data->DataSize, data->DataDirection,
11493 		    (uint8_t *)((uintptr_t)data->PtrDataOut),
11494 		    data->DataOutSize, data->Timeout, mode));
11495 	} else {
11496 		return (EINVAL);
11497 	}
11498 }
11499 
11500 static uint8_t
mptsas_get_fw_diag_buffer_number(mptsas_t * mpt,uint32_t unique_id)11501 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11502 {
11503 	uint8_t	index;
11504 
11505 	for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11506 		if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11507 			return (index);
11508 		}
11509 	}
11510 
11511 	return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11512 }
11513 
11514 static void
mptsas_start_diag(mptsas_t * mpt,mptsas_cmd_t * cmd)11515 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11516 {
11517 	pMpi2DiagBufferPostRequest_t	pDiag_post_msg;
11518 	pMpi2DiagReleaseRequest_t	pDiag_release_msg;
11519 	struct scsi_pkt			*pkt = cmd->cmd_pkt;
11520 	mptsas_diag_request_t		*diag = pkt->pkt_ha_private;
11521 	uint32_t			i;
11522 	uint64_t			request_desc;
11523 
11524 	ASSERT(mutex_owned(&mpt->m_mutex));
11525 
11526 	/*
11527 	 * Form the diag message depending on the post or release function.
11528 	 */
11529 	if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11530 		pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11531 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
11532 		    cmd->cmd_slot));
11533 		bzero(pDiag_post_msg, mpt->m_req_frame_size);
11534 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11535 		    diag->function);
11536 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11537 		    diag->pBuffer->buffer_type);
11538 		ddi_put8(mpt->m_acc_req_frame_hdl,
11539 		    &pDiag_post_msg->ExtendedType,
11540 		    diag->pBuffer->extended_type);
11541 		ddi_put32(mpt->m_acc_req_frame_hdl,
11542 		    &pDiag_post_msg->BufferLength,
11543 		    diag->pBuffer->buffer_data.size);
11544 		for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11545 		    i++) {
11546 			ddi_put32(mpt->m_acc_req_frame_hdl,
11547 			    &pDiag_post_msg->ProductSpecific[i],
11548 			    diag->pBuffer->product_specific[i]);
11549 		}
11550 		ddi_put32(mpt->m_acc_req_frame_hdl,
11551 		    &pDiag_post_msg->BufferAddress.Low,
11552 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11553 		    & 0xffffffffull));
11554 		ddi_put32(mpt->m_acc_req_frame_hdl,
11555 		    &pDiag_post_msg->BufferAddress.High,
11556 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11557 		    >> 32));
11558 	} else {
11559 		pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11560 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
11561 		    cmd->cmd_slot));
11562 		bzero(pDiag_release_msg, mpt->m_req_frame_size);
11563 		ddi_put8(mpt->m_acc_req_frame_hdl,
11564 		    &pDiag_release_msg->Function, diag->function);
11565 		ddi_put8(mpt->m_acc_req_frame_hdl,
11566 		    &pDiag_release_msg->BufferType,
11567 		    diag->pBuffer->buffer_type);
11568 	}
11569 
11570 	/*
11571 	 * Send the message
11572 	 */
11573 	(void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11574 	    DDI_DMA_SYNC_FORDEV);
11575 	request_desc = (cmd->cmd_slot << 16) +
11576 	    MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11577 	cmd->cmd_rfm = 0;
11578 	MPTSAS_START_CMD(mpt, request_desc);
11579 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11580 	    DDI_SUCCESS) ||
11581 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11582 	    DDI_SUCCESS)) {
11583 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11584 	}
11585 }
11586 
11587 static int
mptsas_post_fw_diag_buffer(mptsas_t * mpt,mptsas_fw_diagnostic_buffer_t * pBuffer,uint32_t * return_code)11588 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11589     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11590 {
11591 	mptsas_diag_request_t		diag;
11592 	int				status, slot_num, post_flags = 0;
11593 	mptsas_cmd_t			*cmd = NULL;
11594 	struct scsi_pkt			*pkt;
11595 	pMpi2DiagBufferPostReply_t	reply;
11596 	uint16_t			iocstatus;
11597 	uint32_t			iocloginfo, transfer_length;
11598 
11599 	/*
11600 	 * If buffer is not enabled, just leave.
11601 	 */
11602 	*return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11603 	if (!pBuffer->enabled) {
11604 		status = DDI_FAILURE;
11605 		goto out;
11606 	}
11607 
11608 	/*
11609 	 * Clear some flags initially.
11610 	 */
11611 	pBuffer->force_release = FALSE;
11612 	pBuffer->valid_data = FALSE;
11613 	pBuffer->owned_by_firmware = FALSE;
11614 
11615 	/*
11616 	 * Get a cmd buffer from the cmd buffer pool
11617 	 */
11618 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11619 		status = DDI_FAILURE;
11620 		mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11621 		goto out;
11622 	}
11623 	post_flags |= MPTSAS_REQUEST_POOL_CMD;
11624 
11625 	bzero((caddr_t)cmd, sizeof (*cmd));
11626 	bzero((caddr_t)pkt, scsi_pkt_size());
11627 
11628 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11629 
11630 	diag.pBuffer = pBuffer;
11631 	diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11632 
11633 	/*
11634 	 * Form a blank cmd/pkt to store the acknowledgement message
11635 	 */
11636 	pkt->pkt_ha_private	= (opaque_t)&diag;
11637 	pkt->pkt_flags		= FLAG_HEAD;
11638 	pkt->pkt_time		= 60;
11639 	cmd->cmd_pkt		= pkt;
11640 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
11641 
11642 	/*
11643 	 * Save the command in a slot
11644 	 */
11645 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11646 		/*
11647 		 * Once passthru command get slot, set cmd_flags
11648 		 * CFLAG_PREPARED.
11649 		 */
11650 		cmd->cmd_flags |= CFLAG_PREPARED;
11651 		mptsas_start_diag(mpt, cmd);
11652 	} else {
11653 		mptsas_waitq_add(mpt, cmd);
11654 	}
11655 
11656 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11657 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11658 	}
11659 
11660 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11661 		status = DDI_FAILURE;
11662 		mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11663 		goto out;
11664 	}
11665 
11666 	/*
11667 	 * cmd_rfm points to the reply message if a reply was given.  Check the
11668 	 * IOCStatus to make sure everything went OK with the FW diag request
11669 	 * and set buffer flags.
11670 	 */
11671 	if (cmd->cmd_rfm) {
11672 		post_flags |= MPTSAS_ADDRESS_REPLY;
11673 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11674 		    DDI_DMA_SYNC_FORCPU);
11675 		reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11676 		    (cmd->cmd_rfm -
11677 		    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11678 
11679 		/*
11680 		 * Get the reply message data
11681 		 */
11682 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11683 		    &reply->IOCStatus);
11684 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11685 		    &reply->IOCLogInfo);
11686 		transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11687 		    &reply->TransferLength);
11688 
11689 		/*
11690 		 * If post failed quit.
11691 		 */
11692 		if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11693 			status = DDI_FAILURE;
11694 			NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11695 			    "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11696 			    iocloginfo, transfer_length));
11697 			goto out;
11698 		}
11699 
11700 		/*
11701 		 * Post was successful.
11702 		 */
11703 		pBuffer->valid_data = TRUE;
11704 		pBuffer->owned_by_firmware = TRUE;
11705 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11706 		status = DDI_SUCCESS;
11707 	}
11708 
11709 out:
11710 	/*
11711 	 * Put the reply frame back on the free queue, increment the free
11712 	 * index, and write the new index to the free index register.  But only
11713 	 * if this reply is an ADDRESS reply.
11714 	 */
11715 	if (post_flags & MPTSAS_ADDRESS_REPLY) {
11716 		ddi_put32(mpt->m_acc_free_queue_hdl,
11717 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11718 		    cmd->cmd_rfm);
11719 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11720 		    DDI_DMA_SYNC_FORDEV);
11721 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11722 			mpt->m_free_index = 0;
11723 		}
11724 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11725 		    mpt->m_free_index);
11726 	}
11727 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11728 		mptsas_remove_cmd(mpt, cmd);
11729 		post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11730 	}
11731 	if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11732 		mptsas_return_to_pool(mpt, cmd);
11733 	}
11734 
11735 	return (status);
11736 }
11737 
11738 static int
mptsas_release_fw_diag_buffer(mptsas_t * mpt,mptsas_fw_diagnostic_buffer_t * pBuffer,uint32_t * return_code,uint32_t diag_type)11739 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11740     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11741     uint32_t diag_type)
11742 {
11743 	mptsas_diag_request_t	diag;
11744 	int			status, slot_num, rel_flags = 0;
11745 	mptsas_cmd_t		*cmd = NULL;
11746 	struct scsi_pkt		*pkt;
11747 	pMpi2DiagReleaseReply_t	reply;
11748 	uint16_t		iocstatus;
11749 	uint32_t		iocloginfo;
11750 
11751 	/*
11752 	 * If buffer is not enabled, just leave.
11753 	 */
11754 	*return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11755 	if (!pBuffer->enabled) {
11756 		mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11757 		    "by the IOC");
11758 		status = DDI_FAILURE;
11759 		goto out;
11760 	}
11761 
11762 	/*
11763 	 * Clear some flags initially.
11764 	 */
11765 	pBuffer->force_release = FALSE;
11766 	pBuffer->valid_data = FALSE;
11767 	pBuffer->owned_by_firmware = FALSE;
11768 
11769 	/*
11770 	 * Get a cmd buffer from the cmd buffer pool
11771 	 */
11772 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11773 		status = DDI_FAILURE;
11774 		mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11775 		    "Diag");
11776 		goto out;
11777 	}
11778 	rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11779 
11780 	bzero((caddr_t)cmd, sizeof (*cmd));
11781 	bzero((caddr_t)pkt, scsi_pkt_size());
11782 
11783 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11784 
11785 	diag.pBuffer = pBuffer;
11786 	diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11787 
11788 	/*
11789 	 * Form a blank cmd/pkt to store the acknowledgement message
11790 	 */
11791 	pkt->pkt_ha_private	= (opaque_t)&diag;
11792 	pkt->pkt_flags		= FLAG_HEAD;
11793 	pkt->pkt_time		= 60;
11794 	cmd->cmd_pkt		= pkt;
11795 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
11796 
11797 	/*
11798 	 * Save the command in a slot
11799 	 */
11800 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11801 		/*
11802 		 * Once passthru command get slot, set cmd_flags
11803 		 * CFLAG_PREPARED.
11804 		 */
11805 		cmd->cmd_flags |= CFLAG_PREPARED;
11806 		mptsas_start_diag(mpt, cmd);
11807 	} else {
11808 		mptsas_waitq_add(mpt, cmd);
11809 	}
11810 
11811 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11812 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11813 	}
11814 
11815 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11816 		status = DDI_FAILURE;
11817 		mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11818 		goto out;
11819 	}
11820 
11821 	/*
11822 	 * cmd_rfm points to the reply message if a reply was given.  Check the
11823 	 * IOCStatus to make sure everything went OK with the FW diag request
11824 	 * and set buffer flags.
11825 	 */
11826 	if (cmd->cmd_rfm) {
11827 		rel_flags |= MPTSAS_ADDRESS_REPLY;
11828 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11829 		    DDI_DMA_SYNC_FORCPU);
11830 		reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11831 		    (cmd->cmd_rfm -
11832 		    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11833 
11834 		/*
11835 		 * Get the reply message data
11836 		 */
11837 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11838 		    &reply->IOCStatus);
11839 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11840 		    &reply->IOCLogInfo);
11841 
11842 		/*
11843 		 * If release failed quit.
11844 		 */
11845 		if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11846 		    pBuffer->owned_by_firmware) {
11847 			status = DDI_FAILURE;
11848 			NDBG13(("release FW Diag Buffer failed: "
11849 			    "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11850 			    iocloginfo));
11851 			goto out;
11852 		}
11853 
11854 		/*
11855 		 * Release was successful.
11856 		 */
11857 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11858 		status = DDI_SUCCESS;
11859 
11860 		/*
11861 		 * If this was for an UNREGISTER diag type command, clear the
11862 		 * unique ID.
11863 		 */
11864 		if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11865 			pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11866 		}
11867 	}
11868 
11869 out:
11870 	/*
11871 	 * Put the reply frame back on the free queue, increment the free
11872 	 * index, and write the new index to the free index register.  But only
11873 	 * if this reply is an ADDRESS reply.
11874 	 */
11875 	if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11876 		ddi_put32(mpt->m_acc_free_queue_hdl,
11877 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11878 		    cmd->cmd_rfm);
11879 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11880 		    DDI_DMA_SYNC_FORDEV);
11881 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11882 			mpt->m_free_index = 0;
11883 		}
11884 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11885 		    mpt->m_free_index);
11886 	}
11887 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11888 		mptsas_remove_cmd(mpt, cmd);
11889 		rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11890 	}
11891 	if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11892 		mptsas_return_to_pool(mpt, cmd);
11893 	}
11894 
11895 	return (status);
11896 }
11897 
11898 static int
mptsas_diag_register(mptsas_t * mpt,mptsas_fw_diag_register_t * diag_register,uint32_t * return_code)11899 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11900     uint32_t *return_code)
11901 {
11902 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
11903 	uint8_t				extended_type, buffer_type, i;
11904 	uint32_t			buffer_size;
11905 	uint32_t			unique_id;
11906 	int				status;
11907 
11908 	ASSERT(mutex_owned(&mpt->m_mutex));
11909 
11910 	extended_type = diag_register->ExtendedType;
11911 	buffer_type = diag_register->BufferType;
11912 	buffer_size = diag_register->RequestedBufferSize;
11913 	unique_id = diag_register->UniqueId;
11914 
11915 	/*
11916 	 * Check for valid buffer type
11917 	 */
11918 	if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11919 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11920 		return (DDI_FAILURE);
11921 	}
11922 
11923 	/*
11924 	 * Get the current buffer and look up the unique ID.  The unique ID
11925 	 * should not be found.  If it is, the ID is already in use.
11926 	 */
11927 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11928 	pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11929 	if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11930 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11931 		return (DDI_FAILURE);
11932 	}
11933 
11934 	/*
11935 	 * The buffer's unique ID should not be registered yet, and the given
11936 	 * unique ID cannot be 0.
11937 	 */
11938 	if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11939 	    (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11940 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11941 		return (DDI_FAILURE);
11942 	}
11943 
11944 	/*
11945 	 * If this buffer is already posted as immediate, just change owner.
11946 	 */
11947 	if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11948 	    (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11949 		pBuffer->immediate = FALSE;
11950 		pBuffer->unique_id = unique_id;
11951 		return (DDI_SUCCESS);
11952 	}
11953 
11954 	/*
11955 	 * Post a new buffer after checking if it's enabled.  The DMA buffer
11956 	 * that is allocated will be contiguous (sgl_len = 1).
11957 	 */
11958 	if (!pBuffer->enabled) {
11959 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11960 		return (DDI_FAILURE);
11961 	}
11962 	bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11963 	pBuffer->buffer_data.size = buffer_size;
11964 	if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11965 		mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11966 		    "diag buffer: size = %d bytes", buffer_size);
11967 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11968 		return (DDI_FAILURE);
11969 	}
11970 
11971 	/*
11972 	 * Copy the given info to the diag buffer and post the buffer.
11973 	 */
11974 	pBuffer->buffer_type = buffer_type;
11975 	pBuffer->immediate = FALSE;
11976 	if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11977 		for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11978 		    i++) {
11979 			pBuffer->product_specific[i] =
11980 			    diag_register->ProductSpecific[i];
11981 		}
11982 	}
11983 	pBuffer->extended_type = extended_type;
11984 	pBuffer->unique_id = unique_id;
11985 	status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11986 
11987 	if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11988 	    DDI_SUCCESS) {
11989 		mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11990 		    "mptsas_diag_register.");
11991 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11992 		status = DDI_FAILURE;
11993 	}
11994 
11995 	/*
11996 	 * In case there was a failure, free the DMA buffer.
11997 	 */
11998 	if (status == DDI_FAILURE) {
11999 		mptsas_dma_free(&pBuffer->buffer_data);
12000 	}
12001 
12002 	return (status);
12003 }
12004 
12005 static int
mptsas_diag_unregister(mptsas_t * mpt,mptsas_fw_diag_unregister_t * diag_unregister,uint32_t * return_code)12006 mptsas_diag_unregister(mptsas_t *mpt,
12007     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
12008 {
12009 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12010 	uint8_t				i;
12011 	uint32_t			unique_id;
12012 	int				status;
12013 
12014 	ASSERT(mutex_owned(&mpt->m_mutex));
12015 
12016 	unique_id = diag_unregister->UniqueId;
12017 
12018 	/*
12019 	 * Get the current buffer and look up the unique ID.  The unique ID
12020 	 * should be there.
12021 	 */
12022 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12023 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12024 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12025 		return (DDI_FAILURE);
12026 	}
12027 
12028 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12029 
12030 	/*
12031 	 * Try to release the buffer from FW before freeing it.  If release
12032 	 * fails, don't free the DMA buffer in case FW tries to access it
12033 	 * later.  If buffer is not owned by firmware, can't release it.
12034 	 */
12035 	if (!pBuffer->owned_by_firmware) {
12036 		status = DDI_SUCCESS;
12037 	} else {
12038 		status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
12039 		    return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
12040 	}
12041 
12042 	/*
12043 	 * At this point, return the current status no matter what happens with
12044 	 * the DMA buffer.
12045 	 */
12046 	pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
12047 	if (status == DDI_SUCCESS) {
12048 		if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12049 		    DDI_SUCCESS) {
12050 			mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
12051 			    "in mptsas_diag_unregister.");
12052 			ddi_fm_service_impact(mpt->m_dip,
12053 			    DDI_SERVICE_UNAFFECTED);
12054 		}
12055 		mptsas_dma_free(&pBuffer->buffer_data);
12056 	}
12057 
12058 	return (status);
12059 }
12060 
12061 static int
mptsas_diag_query(mptsas_t * mpt,mptsas_fw_diag_query_t * diag_query,uint32_t * return_code)12062 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
12063     uint32_t *return_code)
12064 {
12065 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12066 	uint8_t				i;
12067 	uint32_t			unique_id;
12068 
12069 	ASSERT(mutex_owned(&mpt->m_mutex));
12070 
12071 	unique_id = diag_query->UniqueId;
12072 
12073 	/*
12074 	 * If ID is valid, query on ID.
12075 	 * If ID is invalid, query on buffer type.
12076 	 */
12077 	if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
12078 		i = diag_query->BufferType;
12079 		if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
12080 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12081 			return (DDI_FAILURE);
12082 		}
12083 	} else {
12084 		i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12085 		if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12086 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12087 			return (DDI_FAILURE);
12088 		}
12089 	}
12090 
12091 	/*
12092 	 * Fill query structure with the diag buffer info.
12093 	 */
12094 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12095 	diag_query->BufferType = pBuffer->buffer_type;
12096 	diag_query->ExtendedType = pBuffer->extended_type;
12097 	if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
12098 		for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
12099 		    i++) {
12100 			diag_query->ProductSpecific[i] =
12101 			    pBuffer->product_specific[i];
12102 		}
12103 	}
12104 	diag_query->TotalBufferSize = pBuffer->buffer_data.size;
12105 	diag_query->DriverAddedBufferSize = 0;
12106 	diag_query->UniqueId = pBuffer->unique_id;
12107 	diag_query->ApplicationFlags = 0;
12108 	diag_query->DiagnosticFlags = 0;
12109 
12110 	/*
12111 	 * Set/Clear application flags
12112 	 */
12113 	if (pBuffer->immediate) {
12114 		diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12115 	} else {
12116 		diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12117 	}
12118 	if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
12119 		diag_query->ApplicationFlags |=
12120 		    MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12121 	} else {
12122 		diag_query->ApplicationFlags &=
12123 		    ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12124 	}
12125 	if (pBuffer->owned_by_firmware) {
12126 		diag_query->ApplicationFlags |=
12127 		    MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12128 	} else {
12129 		diag_query->ApplicationFlags &=
12130 		    ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12131 	}
12132 
12133 	return (DDI_SUCCESS);
12134 }
12135 
12136 static int
mptsas_diag_read_buffer(mptsas_t * mpt,mptsas_diag_read_buffer_t * diag_read_buffer,uint8_t * ioctl_buf,uint32_t * return_code,int ioctl_mode)12137 mptsas_diag_read_buffer(mptsas_t *mpt,
12138     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
12139     uint32_t *return_code, int ioctl_mode)
12140 {
12141 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12142 	uint8_t				i, *pData;
12143 	uint32_t			unique_id, byte;
12144 	int				status;
12145 
12146 	ASSERT(mutex_owned(&mpt->m_mutex));
12147 
12148 	unique_id = diag_read_buffer->UniqueId;
12149 
12150 	/*
12151 	 * Get the current buffer and look up the unique ID.  The unique ID
12152 	 * should be there.
12153 	 */
12154 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12155 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12156 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12157 		return (DDI_FAILURE);
12158 	}
12159 
12160 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12161 
12162 	/*
12163 	 * Make sure requested read is within limits
12164 	 */
12165 	if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
12166 	    pBuffer->buffer_data.size) {
12167 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12168 		return (DDI_FAILURE);
12169 	}
12170 
12171 	/*
12172 	 * Copy the requested data from DMA to the diag_read_buffer.  The DMA
12173 	 * buffer that was allocated is one contiguous buffer.
12174 	 */
12175 	pData = (uint8_t *)(pBuffer->buffer_data.memp +
12176 	    diag_read_buffer->StartingOffset);
12177 	(void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
12178 	    DDI_DMA_SYNC_FORCPU);
12179 	for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
12180 		if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
12181 		    != 0) {
12182 			return (DDI_FAILURE);
12183 		}
12184 	}
12185 	diag_read_buffer->Status = 0;
12186 
12187 	/*
12188 	 * Set or clear the Force Release flag.
12189 	 */
12190 	if (pBuffer->force_release) {
12191 		diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12192 	} else {
12193 		diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12194 	}
12195 
12196 	/*
12197 	 * If buffer is to be reregistered, make sure it's not already owned by
12198 	 * firmware first.
12199 	 */
12200 	status = DDI_SUCCESS;
12201 	if (!pBuffer->owned_by_firmware) {
12202 		if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
12203 			status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
12204 			    return_code);
12205 		}
12206 	}
12207 
12208 	return (status);
12209 }
12210 
12211 static int
mptsas_diag_release(mptsas_t * mpt,mptsas_fw_diag_release_t * diag_release,uint32_t * return_code)12212 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
12213     uint32_t *return_code)
12214 {
12215 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12216 	uint8_t				i;
12217 	uint32_t			unique_id;
12218 	int				status;
12219 
12220 	ASSERT(mutex_owned(&mpt->m_mutex));
12221 
12222 	unique_id = diag_release->UniqueId;
12223 
12224 	/*
12225 	 * Get the current buffer and look up the unique ID.  The unique ID
12226 	 * should be there.
12227 	 */
12228 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12229 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12230 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12231 		return (DDI_FAILURE);
12232 	}
12233 
12234 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12235 
12236 	/*
12237 	 * If buffer is not owned by firmware, it's already been released.
12238 	 */
12239 	if (!pBuffer->owned_by_firmware) {
12240 		*return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
12241 		return (DDI_FAILURE);
12242 	}
12243 
12244 	/*
12245 	 * Release the buffer.
12246 	 */
12247 	status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
12248 	    MPTSAS_FW_DIAG_TYPE_RELEASE);
12249 	return (status);
12250 }
12251 
12252 static int
mptsas_do_diag_action(mptsas_t * mpt,uint32_t action,uint8_t * diag_action,uint32_t length,uint32_t * return_code,int ioctl_mode)12253 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
12254     uint32_t length, uint32_t *return_code, int ioctl_mode)
12255 {
12256 	mptsas_fw_diag_register_t	diag_register;
12257 	mptsas_fw_diag_unregister_t	diag_unregister;
12258 	mptsas_fw_diag_query_t		diag_query;
12259 	mptsas_diag_read_buffer_t	diag_read_buffer;
12260 	mptsas_fw_diag_release_t	diag_release;
12261 	int				status = DDI_SUCCESS;
12262 	uint32_t			original_return_code, read_buf_len;
12263 
12264 	ASSERT(mutex_owned(&mpt->m_mutex));
12265 
12266 	original_return_code = *return_code;
12267 	*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
12268 
12269 	switch (action) {
12270 		case MPTSAS_FW_DIAG_TYPE_REGISTER:
12271 			if (!length) {
12272 				*return_code =
12273 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12274 				status = DDI_FAILURE;
12275 				break;
12276 			}
12277 			if (ddi_copyin(diag_action, &diag_register,
12278 			    sizeof (diag_register), ioctl_mode) != 0) {
12279 				return (DDI_FAILURE);
12280 			}
12281 			status = mptsas_diag_register(mpt, &diag_register,
12282 			    return_code);
12283 			break;
12284 
12285 		case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
12286 			if (length < sizeof (diag_unregister)) {
12287 				*return_code =
12288 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12289 				status = DDI_FAILURE;
12290 				break;
12291 			}
12292 			if (ddi_copyin(diag_action, &diag_unregister,
12293 			    sizeof (diag_unregister), ioctl_mode) != 0) {
12294 				return (DDI_FAILURE);
12295 			}
12296 			status = mptsas_diag_unregister(mpt, &diag_unregister,
12297 			    return_code);
12298 			break;
12299 
12300 		case MPTSAS_FW_DIAG_TYPE_QUERY:
12301 			if (length < sizeof (diag_query)) {
12302 				*return_code =
12303 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12304 				status = DDI_FAILURE;
12305 				break;
12306 			}
12307 			if (ddi_copyin(diag_action, &diag_query,
12308 			    sizeof (diag_query), ioctl_mode) != 0) {
12309 				return (DDI_FAILURE);
12310 			}
12311 			status = mptsas_diag_query(mpt, &diag_query,
12312 			    return_code);
12313 			if (status == DDI_SUCCESS) {
12314 				if (ddi_copyout(&diag_query, diag_action,
12315 				    sizeof (diag_query), ioctl_mode) != 0) {
12316 					return (DDI_FAILURE);
12317 				}
12318 			}
12319 			break;
12320 
12321 		case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
12322 			if (ddi_copyin(diag_action, &diag_read_buffer,
12323 			    sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
12324 				return (DDI_FAILURE);
12325 			}
12326 			read_buf_len = sizeof (diag_read_buffer) -
12327 			    sizeof (diag_read_buffer.DataBuffer) +
12328 			    diag_read_buffer.BytesToRead;
12329 			if (length < read_buf_len) {
12330 				*return_code =
12331 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12332 				status = DDI_FAILURE;
12333 				break;
12334 			}
12335 			status = mptsas_diag_read_buffer(mpt,
12336 			    &diag_read_buffer, diag_action +
12337 			    sizeof (diag_read_buffer) - 4, return_code,
12338 			    ioctl_mode);
12339 			if (status == DDI_SUCCESS) {
12340 				if (ddi_copyout(&diag_read_buffer, diag_action,
12341 				    sizeof (diag_read_buffer) - 4, ioctl_mode)
12342 				    != 0) {
12343 					return (DDI_FAILURE);
12344 				}
12345 			}
12346 			break;
12347 
12348 		case MPTSAS_FW_DIAG_TYPE_RELEASE:
12349 			if (length < sizeof (diag_release)) {
12350 				*return_code =
12351 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12352 				status = DDI_FAILURE;
12353 				break;
12354 			}
12355 			if (ddi_copyin(diag_action, &diag_release,
12356 			    sizeof (diag_release), ioctl_mode) != 0) {
12357 				return (DDI_FAILURE);
12358 			}
12359 			status = mptsas_diag_release(mpt, &diag_release,
12360 			    return_code);
12361 			break;
12362 
12363 		default:
12364 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12365 			status = DDI_FAILURE;
12366 			break;
12367 	}
12368 
12369 	if ((status == DDI_FAILURE) &&
12370 	    (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12371 	    (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12372 		status = DDI_SUCCESS;
12373 	}
12374 
12375 	return (status);
12376 }
12377 
12378 static int
mptsas_diag_action(mptsas_t * mpt,mptsas_diag_action_t * user_data,int mode)12379 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12380 {
12381 	int			status;
12382 	mptsas_diag_action_t	driver_data;
12383 
12384 	ASSERT(mutex_owned(&mpt->m_mutex));
12385 
12386 	/*
12387 	 * Copy the user data to a driver data buffer.
12388 	 */
12389 	if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12390 	    mode) == 0) {
12391 		/*
12392 		 * Send diag action request if Action is valid
12393 		 */
12394 		if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12395 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12396 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12397 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12398 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12399 			status = mptsas_do_diag_action(mpt, driver_data.Action,
12400 			    (void *)(uintptr_t)driver_data.PtrDiagAction,
12401 			    driver_data.Length, &driver_data.ReturnCode,
12402 			    mode);
12403 			if (status == DDI_SUCCESS) {
12404 				if (ddi_copyout(&driver_data.ReturnCode,
12405 				    &user_data->ReturnCode,
12406 				    sizeof (user_data->ReturnCode), mode)
12407 				    != 0) {
12408 					status = EFAULT;
12409 				} else {
12410 					status = 0;
12411 				}
12412 			} else {
12413 				status = EIO;
12414 			}
12415 		} else {
12416 			status = EINVAL;
12417 		}
12418 	} else {
12419 		status = EFAULT;
12420 	}
12421 
12422 	return (status);
12423 }
12424 
12425 /*
12426  * This routine handles the "event query" ioctl.
12427  */
12428 static int
mptsas_event_query(mptsas_t * mpt,mptsas_event_query_t * data,int mode,int * rval)12429 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12430     int *rval)
12431 {
12432 	int			status;
12433 	mptsas_event_query_t	driverdata;
12434 	uint8_t			i;
12435 
12436 	driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12437 
12438 	mutex_enter(&mpt->m_mutex);
12439 	for (i = 0; i < 4; i++) {
12440 		driverdata.Types[i] = mpt->m_event_mask[i];
12441 	}
12442 	mutex_exit(&mpt->m_mutex);
12443 
12444 	if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12445 		status = EFAULT;
12446 	} else {
12447 		*rval = MPTIOCTL_STATUS_GOOD;
12448 		status = 0;
12449 	}
12450 
12451 	return (status);
12452 }
12453 
12454 /*
12455  * This routine handles the "event enable" ioctl.
12456  */
12457 static int
mptsas_event_enable(mptsas_t * mpt,mptsas_event_enable_t * data,int mode,int * rval)12458 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12459     int *rval)
12460 {
12461 	int			status;
12462 	mptsas_event_enable_t	driverdata;
12463 	uint8_t			i;
12464 
12465 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12466 		mutex_enter(&mpt->m_mutex);
12467 		for (i = 0; i < 4; i++) {
12468 			mpt->m_event_mask[i] = driverdata.Types[i];
12469 		}
12470 		mutex_exit(&mpt->m_mutex);
12471 
12472 		*rval = MPTIOCTL_STATUS_GOOD;
12473 		status = 0;
12474 	} else {
12475 		status = EFAULT;
12476 	}
12477 	return (status);
12478 }
12479 
12480 /*
12481  * This routine handles the "event report" ioctl.
12482  */
12483 static int
mptsas_event_report(mptsas_t * mpt,mptsas_event_report_t * data,int mode,int * rval)12484 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12485     int *rval)
12486 {
12487 	int			status;
12488 	mptsas_event_report_t	driverdata;
12489 
12490 	mutex_enter(&mpt->m_mutex);
12491 
12492 	if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12493 	    mode) == 0) {
12494 		if (driverdata.Size >= sizeof (mpt->m_events)) {
12495 			if (ddi_copyout(mpt->m_events, data->Events,
12496 			    sizeof (mpt->m_events), mode) != 0) {
12497 				status = EFAULT;
12498 			} else {
12499 				if (driverdata.Size > sizeof (mpt->m_events)) {
12500 					driverdata.Size =
12501 					    sizeof (mpt->m_events);
12502 					if (ddi_copyout(&driverdata.Size,
12503 					    &data->Size,
12504 					    sizeof (driverdata.Size),
12505 					    mode) != 0) {
12506 						status = EFAULT;
12507 					} else {
12508 						*rval = MPTIOCTL_STATUS_GOOD;
12509 						status = 0;
12510 					}
12511 				} else {
12512 					*rval = MPTIOCTL_STATUS_GOOD;
12513 					status = 0;
12514 				}
12515 			}
12516 		} else {
12517 			*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12518 			status = 0;
12519 		}
12520 	} else {
12521 		status = EFAULT;
12522 	}
12523 
12524 	mutex_exit(&mpt->m_mutex);
12525 	return (status);
12526 }
12527 
12528 static void
mptsas_lookup_pci_data(mptsas_t * mpt,mptsas_adapter_data_t * adapter_data)12529 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12530 {
12531 	int	*reg_data;
12532 	uint_t	reglen;
12533 
12534 	/*
12535 	 * Lookup the 'reg' property and extract the other data
12536 	 */
12537 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12538 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
12539 	    DDI_PROP_SUCCESS) {
12540 		/*
12541 		 * Extract the PCI data from the 'reg' property first DWORD.
12542 		 * The entry looks like the following:
12543 		 * First DWORD:
12544 		 * Bits 0 - 7 8-bit Register number
12545 		 * Bits 8 - 10 3-bit Function number
12546 		 * Bits 11 - 15 5-bit Device number
12547 		 * Bits 16 - 23 8-bit Bus number
12548 		 * Bits 24 - 25 2-bit Address Space type identifier
12549 		 *
12550 		 */
12551 		adapter_data->PciInformation.u.bits.BusNumber =
12552 		    (reg_data[0] & 0x00FF0000) >> 16;
12553 		adapter_data->PciInformation.u.bits.DeviceNumber =
12554 		    (reg_data[0] & 0x0000F800) >> 11;
12555 		adapter_data->PciInformation.u.bits.FunctionNumber =
12556 		    (reg_data[0] & 0x00000700) >> 8;
12557 		ddi_prop_free((void *)reg_data);
12558 	} else {
12559 		/*
12560 		 * If we can't determine the PCI data then we fill in FF's for
12561 		 * the data to indicate this.
12562 		 */
12563 		adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12564 		adapter_data->MpiPortNumber = 0xFFFFFFFF;
12565 		adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12566 	}
12567 
12568 	/*
12569 	 * Saved in the mpt->m_fwversion
12570 	 */
12571 	adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12572 }
12573 
12574 static void
mptsas_read_adapter_data(mptsas_t * mpt,mptsas_adapter_data_t * adapter_data)12575 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12576 {
12577 	char	*driver_verstr = MPTSAS_MOD_STRING;
12578 
12579 	mptsas_lookup_pci_data(mpt, adapter_data);
12580 	adapter_data->AdapterType = mpt->m_MPI25 ?
12581 	    MPTIOCTL_ADAPTER_TYPE_SAS3 :
12582 	    MPTIOCTL_ADAPTER_TYPE_SAS2;
12583 	adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12584 	adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12585 	adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12586 	adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12587 	(void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12588 	adapter_data->BiosVersion = 0;
12589 	(void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12590 }
12591 
12592 static void
mptsas_read_pci_info(mptsas_t * mpt,mptsas_pci_info_t * pci_info)12593 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12594 {
12595 	int	*reg_data, i;
12596 	uint_t	reglen;
12597 
12598 	/*
12599 	 * Lookup the 'reg' property and extract the other data
12600 	 */
12601 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12602 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
12603 	    DDI_PROP_SUCCESS) {
12604 		/*
12605 		 * Extract the PCI data from the 'reg' property first DWORD.
12606 		 * The entry looks like the following:
12607 		 * First DWORD:
12608 		 * Bits 8 - 10 3-bit Function number
12609 		 * Bits 11 - 15 5-bit Device number
12610 		 * Bits 16 - 23 8-bit Bus number
12611 		 */
12612 		pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12613 		pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12614 		pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12615 		ddi_prop_free((void *)reg_data);
12616 	} else {
12617 		/*
12618 		 * If we can't determine the PCI info then we fill in FF's for
12619 		 * the data to indicate this.
12620 		 */
12621 		pci_info->BusNumber = 0xFFFFFFFF;
12622 		pci_info->DeviceNumber = 0xFF;
12623 		pci_info->FunctionNumber = 0xFF;
12624 	}
12625 
12626 	/*
12627 	 * Now get the interrupt vector and the pci header.  The vector can
12628 	 * only be 0 right now.  The header is the first 256 bytes of config
12629 	 * space.
12630 	 */
12631 	pci_info->InterruptVector = 0;
12632 	for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12633 		pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12634 		    i);
12635 	}
12636 }
12637 
12638 static int
mptsas_reg_access(mptsas_t * mpt,mptsas_reg_access_t * data,int mode)12639 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12640 {
12641 	int			status = 0;
12642 	mptsas_reg_access_t	driverdata;
12643 
12644 	mutex_enter(&mpt->m_mutex);
12645 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12646 		switch (driverdata.Command) {
12647 			/*
12648 			 * IO access is not supported.
12649 			 */
12650 			case REG_IO_READ:
12651 			case REG_IO_WRITE:
12652 				mptsas_log(mpt, CE_WARN, "IO access is not "
12653 				    "supported.  Use memory access.");
12654 				status = EINVAL;
12655 				break;
12656 
12657 			case REG_MEM_READ:
12658 				driverdata.RegData = ddi_get32(mpt->m_datap,
12659 				    (uint32_t *)(void *)mpt->m_reg +
12660 				    driverdata.RegOffset);
12661 				if (ddi_copyout(&driverdata.RegData,
12662 				    &data->RegData,
12663 				    sizeof (driverdata.RegData), mode) != 0) {
12664 					mptsas_log(mpt, CE_WARN, "Register "
12665 					    "Read Failed");
12666 					status = EFAULT;
12667 				}
12668 				break;
12669 
12670 			case REG_MEM_WRITE:
12671 				ddi_put32(mpt->m_datap,
12672 				    (uint32_t *)(void *)mpt->m_reg +
12673 				    driverdata.RegOffset,
12674 				    driverdata.RegData);
12675 				break;
12676 
12677 			default:
12678 				status = EINVAL;
12679 				break;
12680 		}
12681 	} else {
12682 		status = EFAULT;
12683 	}
12684 
12685 	mutex_exit(&mpt->m_mutex);
12686 	return (status);
12687 }
12688 
12689 static int
led_control(mptsas_t * mpt,intptr_t data,int mode)12690 led_control(mptsas_t *mpt, intptr_t data, int mode)
12691 {
12692 	int ret = 0;
12693 	mptsas_led_control_t lc;
12694 	mptsas_enclosure_t *mep;
12695 	uint16_t slotidx;
12696 
12697 	if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12698 		return (EFAULT);
12699 	}
12700 
12701 	if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12702 	    lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12703 	    lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12704 	    lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12705 	    (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12706 	    lc.LedStatus != 1)) {
12707 		return (EINVAL);
12708 	}
12709 
12710 	if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12711 	    (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12712 		return (EACCES);
12713 
12714 	/* Locate the required enclosure */
12715 	mutex_enter(&mpt->m_mutex);
12716 	mep = mptsas_enc_lookup(mpt, lc.Enclosure);
12717 	if (mep == NULL) {
12718 		mutex_exit(&mpt->m_mutex);
12719 		return (ENOENT);
12720 	}
12721 
12722 	if (lc.Slot < mep->me_fslot) {
12723 		mutex_exit(&mpt->m_mutex);
12724 		return (ENOENT);
12725 	}
12726 
12727 	/*
12728 	 * Slots on the enclosure are maintained in array where me_fslot is
12729 	 * entry zero. We normalize the requested slot.
12730 	 */
12731 	slotidx = lc.Slot - mep->me_fslot;
12732 	if (slotidx >= mep->me_nslots) {
12733 		mutex_exit(&mpt->m_mutex);
12734 		return (ENOENT);
12735 	}
12736 
12737 	if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12738 		/* Update our internal LED state. */
12739 		mep->me_slotleds[slotidx] &= ~(1 << (lc.Led - 1));
12740 		mep->me_slotleds[slotidx] |= lc.LedStatus << (lc.Led - 1);
12741 
12742 		/* Flush it to the controller. */
12743 		ret = mptsas_flush_led_status(mpt, mep, slotidx);
12744 		mutex_exit(&mpt->m_mutex);
12745 		return (ret);
12746 	}
12747 
12748 	/* Return our internal LED state. */
12749 	lc.LedStatus = (mep->me_slotleds[slotidx] >> (lc.Led - 1)) & 1;
12750 	mutex_exit(&mpt->m_mutex);
12751 
12752 	if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12753 		return (EFAULT);
12754 	}
12755 
12756 	return (0);
12757 }
12758 
12759 static int
get_disk_info(mptsas_t * mpt,intptr_t data,int mode)12760 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12761 {
12762 	uint16_t i = 0;
12763 	uint16_t count = 0;
12764 	int ret = 0;
12765 	mptsas_target_t *ptgt;
12766 	mptsas_disk_info_t *di;
12767 	STRUCT_DECL(mptsas_get_disk_info, gdi);
12768 
12769 	if ((mode & FREAD) == 0)
12770 		return (EACCES);
12771 
12772 	STRUCT_INIT(gdi, get_udatamodel());
12773 
12774 	if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12775 	    mode) != 0) {
12776 		return (EFAULT);
12777 	}
12778 
12779 	/* Find out how many targets there are. */
12780 	mutex_enter(&mpt->m_mutex);
12781 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12782 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
12783 		count++;
12784 	}
12785 	mutex_exit(&mpt->m_mutex);
12786 
12787 	/*
12788 	 * If we haven't been asked to copy out information on each target,
12789 	 * then just return the count.
12790 	 */
12791 	STRUCT_FSET(gdi, DiskCount, count);
12792 	if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12793 		goto copy_out;
12794 
12795 	/*
12796 	 * If we haven't been given a large enough buffer to copy out into,
12797 	 * let the caller know.
12798 	 */
12799 	if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12800 	    count * sizeof (mptsas_disk_info_t)) {
12801 		ret = ENOSPC;
12802 		goto copy_out;
12803 	}
12804 
12805 	di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12806 
12807 	mutex_enter(&mpt->m_mutex);
12808 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12809 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
12810 		if (i >= count) {
12811 			/*
12812 			 * The number of targets changed while we weren't
12813 			 * looking, so give up.
12814 			 */
12815 			refhash_rele(mpt->m_targets, ptgt);
12816 			mutex_exit(&mpt->m_mutex);
12817 			kmem_free(di, count * sizeof (mptsas_disk_info_t));
12818 			return (EAGAIN);
12819 		}
12820 		di[i].Instance = mpt->m_instance;
12821 		di[i].Enclosure = ptgt->m_enclosure;
12822 		di[i].Slot = ptgt->m_slot_num;
12823 		di[i].SasAddress = ptgt->m_addr.mta_wwn;
12824 		i++;
12825 	}
12826 	mutex_exit(&mpt->m_mutex);
12827 	STRUCT_FSET(gdi, DiskCount, i);
12828 
12829 	/* Copy out the disk information to the caller. */
12830 	if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12831 	    i * sizeof (mptsas_disk_info_t), mode) != 0) {
12832 		ret = EFAULT;
12833 	}
12834 
12835 	kmem_free(di, count * sizeof (mptsas_disk_info_t));
12836 
12837 copy_out:
12838 	if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12839 	    mode) != 0) {
12840 		ret = EFAULT;
12841 	}
12842 
12843 	return (ret);
12844 }
12845 
12846 static int
mptsas_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)12847 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12848     int *rval)
12849 {
12850 	int			status = 0;
12851 	mptsas_t		*mpt;
12852 	mptsas_update_flash_t	flashdata;
12853 	mptsas_pass_thru_t	passthru_data;
12854 	mptsas_adapter_data_t   adapter_data;
12855 	mptsas_pci_info_t	pci_info;
12856 	int			copylen;
12857 
12858 	int			iport_flag = 0;
12859 	dev_info_t		*dip = NULL;
12860 	mptsas_phymask_t	phymask = 0;
12861 	struct devctl_iocdata	*dcp = NULL;
12862 	char			*addr = NULL;
12863 	mptsas_target_t		*ptgt = NULL;
12864 
12865 	*rval = MPTIOCTL_STATUS_GOOD;
12866 	if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12867 		return (EPERM);
12868 	}
12869 
12870 	mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12871 	if (mpt == NULL) {
12872 		/*
12873 		 * Called from iport node, get the states
12874 		 */
12875 		iport_flag = 1;
12876 		dip = mptsas_get_dip_from_dev(dev, &phymask);
12877 		if (dip == NULL) {
12878 			return (ENXIO);
12879 		}
12880 		mpt = DIP2MPT(dip);
12881 	}
12882 	/* Make sure power level is D0 before accessing registers */
12883 	mutex_enter(&mpt->m_mutex);
12884 	if (mpt->m_options & MPTSAS_OPT_PM) {
12885 		(void) pm_busy_component(mpt->m_dip, 0);
12886 		if (mpt->m_power_level != PM_LEVEL_D0) {
12887 			mutex_exit(&mpt->m_mutex);
12888 			if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12889 			    DDI_SUCCESS) {
12890 				mptsas_log(mpt, CE_WARN,
12891 				    "mptsas%d: mptsas_ioctl: Raise power "
12892 				    "request failed.", mpt->m_instance);
12893 				(void) pm_idle_component(mpt->m_dip, 0);
12894 				return (ENXIO);
12895 			}
12896 		} else {
12897 			mutex_exit(&mpt->m_mutex);
12898 		}
12899 	} else {
12900 		mutex_exit(&mpt->m_mutex);
12901 	}
12902 
12903 	if (iport_flag) {
12904 		status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12905 		if (status != 0) {
12906 			goto out;
12907 		}
12908 		/*
12909 		 * The following code control the OK2RM LED, it doesn't affect
12910 		 * the ioctl return status.
12911 		 */
12912 		if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12913 		    (cmd == DEVCTL_DEVICE_OFFLINE)) {
12914 			if (ndi_dc_allochdl((void *)data, &dcp) !=
12915 			    NDI_SUCCESS) {
12916 				goto out;
12917 			}
12918 			addr = ndi_dc_getaddr(dcp);
12919 			ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12920 			if (ptgt == NULL) {
12921 				NDBG14(("mptsas_ioctl led control: tgt %s not "
12922 				    "found", addr));
12923 				ndi_dc_freehdl(dcp);
12924 				goto out;
12925 			}
12926 			ndi_dc_freehdl(dcp);
12927 		}
12928 		goto out;
12929 	}
12930 	switch (cmd) {
12931 		case MPTIOCTL_GET_DISK_INFO:
12932 			status = get_disk_info(mpt, data, mode);
12933 			break;
12934 		case MPTIOCTL_LED_CONTROL:
12935 			status = led_control(mpt, data, mode);
12936 			break;
12937 		case MPTIOCTL_UPDATE_FLASH:
12938 			if (ddi_copyin((void *)data, &flashdata,
12939 				sizeof (struct mptsas_update_flash), mode)) {
12940 				status = EFAULT;
12941 				break;
12942 			}
12943 
12944 			mutex_enter(&mpt->m_mutex);
12945 			if (mptsas_update_flash(mpt,
12946 			    (caddr_t)(long)flashdata.PtrBuffer,
12947 			    flashdata.ImageSize, flashdata.ImageType, mode)) {
12948 				status = EFAULT;
12949 			}
12950 
12951 			/*
12952 			 * Reset the chip to start using the new
12953 			 * firmware.  Reset if failed also.
12954 			 */
12955 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12956 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12957 				status = EFAULT;
12958 			}
12959 			mutex_exit(&mpt->m_mutex);
12960 			break;
12961 		case MPTIOCTL_PASS_THRU:
12962 			/*
12963 			 * The user has requested to pass through a command to
12964 			 * be executed by the MPT firmware.  Call our routine
12965 			 * which does this.  Only allow one passthru IOCTL at
12966 			 * one time. Other threads will block on
12967 			 * m_passthru_mutex, which is of adaptive variant.
12968 			 */
12969 			if (ddi_copyin((void *)data, &passthru_data,
12970 			    sizeof (mptsas_pass_thru_t), mode)) {
12971 				status = EFAULT;
12972 				break;
12973 			}
12974 			mutex_enter(&mpt->m_passthru_mutex);
12975 			mutex_enter(&mpt->m_mutex);
12976 			status = mptsas_pass_thru(mpt, &passthru_data, mode);
12977 			mutex_exit(&mpt->m_mutex);
12978 			mutex_exit(&mpt->m_passthru_mutex);
12979 
12980 			break;
12981 		case MPTIOCTL_GET_ADAPTER_DATA:
12982 			/*
12983 			 * The user has requested to read adapter data.  Call
12984 			 * our routine which does this.
12985 			 */
12986 			bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12987 			if (ddi_copyin((void *)data, (void *)&adapter_data,
12988 			    sizeof (mptsas_adapter_data_t), mode)) {
12989 				status = EFAULT;
12990 				break;
12991 			}
12992 			if (adapter_data.StructureLength >=
12993 			    sizeof (mptsas_adapter_data_t)) {
12994 				adapter_data.StructureLength = (uint32_t)
12995 				    sizeof (mptsas_adapter_data_t);
12996 				copylen = sizeof (mptsas_adapter_data_t);
12997 				mutex_enter(&mpt->m_mutex);
12998 				mptsas_read_adapter_data(mpt, &adapter_data);
12999 				mutex_exit(&mpt->m_mutex);
13000 			} else {
13001 				adapter_data.StructureLength = (uint32_t)
13002 				    sizeof (mptsas_adapter_data_t);
13003 				copylen = sizeof (adapter_data.StructureLength);
13004 				*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
13005 			}
13006 			if (ddi_copyout((void *)(&adapter_data), (void *)data,
13007 			    copylen, mode) != 0) {
13008 				status = EFAULT;
13009 			}
13010 			break;
13011 		case MPTIOCTL_GET_PCI_INFO:
13012 			/*
13013 			 * The user has requested to read pci info.  Call
13014 			 * our routine which does this.
13015 			 */
13016 			bzero(&pci_info, sizeof (mptsas_pci_info_t));
13017 			mutex_enter(&mpt->m_mutex);
13018 			mptsas_read_pci_info(mpt, &pci_info);
13019 			mutex_exit(&mpt->m_mutex);
13020 			if (ddi_copyout((void *)(&pci_info), (void *)data,
13021 			    sizeof (mptsas_pci_info_t), mode) != 0) {
13022 				status = EFAULT;
13023 			}
13024 			break;
13025 		case MPTIOCTL_RESET_ADAPTER:
13026 			mutex_enter(&mpt->m_mutex);
13027 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
13028 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
13029 				mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
13030 				    "failed");
13031 				status = EFAULT;
13032 			}
13033 			mutex_exit(&mpt->m_mutex);
13034 			break;
13035 		case MPTIOCTL_DIAG_ACTION:
13036 			/*
13037 			 * The user has done a diag buffer action.  Call our
13038 			 * routine which does this.  Only allow one diag action
13039 			 * at one time.
13040 			 */
13041 			mutex_enter(&mpt->m_mutex);
13042 			if (mpt->m_diag_action_in_progress) {
13043 				mutex_exit(&mpt->m_mutex);
13044 				return (EBUSY);
13045 			}
13046 			mpt->m_diag_action_in_progress = 1;
13047 			status = mptsas_diag_action(mpt,
13048 			    (mptsas_diag_action_t *)data, mode);
13049 			mpt->m_diag_action_in_progress = 0;
13050 			mutex_exit(&mpt->m_mutex);
13051 			break;
13052 		case MPTIOCTL_EVENT_QUERY:
13053 			/*
13054 			 * The user has done an event query. Call our routine
13055 			 * which does this.
13056 			 */
13057 			status = mptsas_event_query(mpt,
13058 			    (mptsas_event_query_t *)data, mode, rval);
13059 			break;
13060 		case MPTIOCTL_EVENT_ENABLE:
13061 			/*
13062 			 * The user has done an event enable. Call our routine
13063 			 * which does this.
13064 			 */
13065 			status = mptsas_event_enable(mpt,
13066 			    (mptsas_event_enable_t *)data, mode, rval);
13067 			break;
13068 		case MPTIOCTL_EVENT_REPORT:
13069 			/*
13070 			 * The user has done an event report. Call our routine
13071 			 * which does this.
13072 			 */
13073 			status = mptsas_event_report(mpt,
13074 			    (mptsas_event_report_t *)data, mode, rval);
13075 			break;
13076 		case MPTIOCTL_REG_ACCESS:
13077 			/*
13078 			 * The user has requested register access.  Call our
13079 			 * routine which does this.
13080 			 */
13081 			status = mptsas_reg_access(mpt,
13082 			    (mptsas_reg_access_t *)data, mode);
13083 			break;
13084 		default:
13085 			status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
13086 			    rval);
13087 			break;
13088 	}
13089 
13090 out:
13091 	return (status);
13092 }
13093 
13094 int
mptsas_restart_ioc(mptsas_t * mpt)13095 mptsas_restart_ioc(mptsas_t *mpt)
13096 {
13097 	int		rval = DDI_SUCCESS;
13098 	mptsas_target_t	*ptgt = NULL;
13099 
13100 	ASSERT(mutex_owned(&mpt->m_mutex));
13101 
13102 	/*
13103 	 * Set a flag telling I/O path that we're processing a reset.  This is
13104 	 * needed because after the reset is complete, the hash table still
13105 	 * needs to be rebuilt.  If I/Os are started before the hash table is
13106 	 * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
13107 	 * so that they can be retried.
13108 	 */
13109 	mpt->m_in_reset = TRUE;
13110 
13111 	/*
13112 	 * Wait until all the allocated sense data buffers for DMA are freed.
13113 	 */
13114 	while (mpt->m_extreq_sense_refcount > 0)
13115 		cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
13116 
13117 	/*
13118 	 * Set all throttles to HOLD
13119 	 */
13120 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13121 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
13122 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13123 	}
13124 
13125 	/*
13126 	 * Disable interrupts
13127 	 */
13128 	MPTSAS_DISABLE_INTR(mpt);
13129 
13130 	/*
13131 	 * Abort all commands: outstanding commands, commands in waitq and
13132 	 * tx_waitq.
13133 	 */
13134 	mptsas_flush_hba(mpt);
13135 
13136 	/*
13137 	 * Reinitialize the chip.
13138 	 */
13139 	if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
13140 		rval = DDI_FAILURE;
13141 	}
13142 
13143 	/*
13144 	 * Enable interrupts again
13145 	 */
13146 	MPTSAS_ENABLE_INTR(mpt);
13147 
13148 	/*
13149 	 * If mptsas_init_chip was successful, update the driver data.
13150 	 */
13151 	if (rval == DDI_SUCCESS) {
13152 		mptsas_update_driver_data(mpt);
13153 	}
13154 
13155 	/*
13156 	 * Reset the throttles
13157 	 */
13158 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13159 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
13160 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
13161 	}
13162 
13163 	mptsas_doneq_empty(mpt);
13164 	mptsas_restart_hba(mpt);
13165 
13166 	if (rval != DDI_SUCCESS) {
13167 		mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
13168 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
13169 	}
13170 
13171 	/*
13172 	 * Clear the reset flag so that I/Os can continue.
13173 	 */
13174 	mpt->m_in_reset = FALSE;
13175 
13176 	return (rval);
13177 }
13178 
13179 static int
mptsas_init_chip(mptsas_t * mpt,int first_time)13180 mptsas_init_chip(mptsas_t *mpt, int first_time)
13181 {
13182 	ddi_dma_cookie_t	cookie;
13183 	uint32_t		i;
13184 	int			rval;
13185 
13186 	/*
13187 	 * Check to see if the firmware image is valid
13188 	 */
13189 	if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
13190 	    MPI2_DIAG_FLASH_BAD_SIG) {
13191 		mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
13192 		goto fail;
13193 	}
13194 
13195 	/*
13196 	 * Reset the chip
13197 	 */
13198 	rval = mptsas_ioc_reset(mpt, first_time);
13199 	if (rval == MPTSAS_RESET_FAIL) {
13200 		mptsas_log(mpt, CE_WARN, "hard reset failed!");
13201 		goto fail;
13202 	}
13203 
13204 	if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
13205 		goto mur;
13206 	}
13207 	/*
13208 	 * Setup configuration space
13209 	 */
13210 	if (mptsas_config_space_init(mpt) == FALSE) {
13211 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13212 		    "failed!");
13213 		goto fail;
13214 	}
13215 
13216 	/*
13217 	 * IOC facts can change after a diag reset so all buffers that are
13218 	 * based on these numbers must be de-allocated and re-allocated.  Get
13219 	 * new IOC facts each time chip is initialized.
13220 	 */
13221 	if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13222 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13223 		goto fail;
13224 	}
13225 
13226 	if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13227 		goto fail;
13228 	}
13229 	/*
13230 	 * Allocate request message frames, reply free queue, reply descriptor
13231 	 * post queue, and reply message frames using latest IOC facts.
13232 	 */
13233 	if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13234 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
13235 		goto fail;
13236 	}
13237 	if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13238 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
13239 		goto fail;
13240 	}
13241 	if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13242 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
13243 		goto fail;
13244 	}
13245 	if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13246 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
13247 		goto fail;
13248 	}
13249 	if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13250 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
13251 		goto fail;
13252 	}
13253 
13254 mur:
13255 	/*
13256 	 * Re-Initialize ioc to operational state
13257 	 */
13258 	if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13259 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13260 		goto fail;
13261 	}
13262 
13263 	mptsas_alloc_reply_args(mpt);
13264 
13265 	/*
13266 	 * Initialize reply post index.  Reply free index is initialized after
13267 	 * the next loop.
13268 	 */
13269 	mpt->m_post_index = 0;
13270 
13271 	/*
13272 	 * Initialize the Reply Free Queue with the physical addresses of our
13273 	 * reply frames.
13274 	 */
13275 	cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
13276 	for (i = 0; i < mpt->m_max_replies; i++) {
13277 		ddi_put32(mpt->m_acc_free_queue_hdl,
13278 		    &((uint32_t *)(void *)mpt->m_free_queue)[i],
13279 		    cookie.dmac_address);
13280 		cookie.dmac_address += mpt->m_reply_frame_size;
13281 	}
13282 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
13283 	    DDI_DMA_SYNC_FORDEV);
13284 
13285 	/*
13286 	 * Initialize the reply free index to one past the last frame on the
13287 	 * queue.  This will signify that the queue is empty to start with.
13288 	 */
13289 	mpt->m_free_index = i;
13290 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
13291 
13292 	/*
13293 	 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
13294 	 */
13295 	for (i = 0; i < mpt->m_post_queue_depth; i++) {
13296 		ddi_put64(mpt->m_acc_post_queue_hdl,
13297 		    &((uint64_t *)(void *)mpt->m_post_queue)[i],
13298 		    0xFFFFFFFFFFFFFFFF);
13299 	}
13300 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
13301 	    DDI_DMA_SYNC_FORDEV);
13302 
13303 	/*
13304 	 * Enable ports
13305 	 */
13306 	if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
13307 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
13308 		goto fail;
13309 	}
13310 
13311 	/*
13312 	 * enable events
13313 	 */
13314 	if (mptsas_ioc_enable_event_notification(mpt)) {
13315 		mptsas_log(mpt, CE_WARN,
13316 		    "mptsas_ioc_enable_event_notification failed");
13317 		goto fail;
13318 	}
13319 
13320 	/*
13321 	 * We need checks in attach and these.
13322 	 * chip_init is called in mult. places
13323 	 */
13324 
13325 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
13326 	    DDI_SUCCESS) ||
13327 	    (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
13328 	    DDI_SUCCESS) ||
13329 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
13330 	    DDI_SUCCESS) ||
13331 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
13332 	    DDI_SUCCESS) ||
13333 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
13334 	    DDI_SUCCESS) ||
13335 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
13336 	    DDI_SUCCESS)) {
13337 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13338 		goto fail;
13339 	}
13340 
13341 	/* Check all acc handles */
13342 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
13343 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
13344 	    DDI_SUCCESS) ||
13345 	    (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
13346 	    DDI_SUCCESS) ||
13347 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
13348 	    DDI_SUCCESS) ||
13349 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
13350 	    DDI_SUCCESS) ||
13351 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
13352 	    DDI_SUCCESS) ||
13353 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
13354 	    DDI_SUCCESS) ||
13355 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
13356 	    DDI_SUCCESS)) {
13357 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13358 		goto fail;
13359 	}
13360 
13361 	return (DDI_SUCCESS);
13362 
13363 fail:
13364 	return (DDI_FAILURE);
13365 }
13366 
13367 static int
mptsas_get_pci_cap(mptsas_t * mpt)13368 mptsas_get_pci_cap(mptsas_t *mpt)
13369 {
13370 	ushort_t caps_ptr, cap, cap_count;
13371 
13372 	if (mpt->m_config_handle == NULL)
13373 		return (FALSE);
13374 	/*
13375 	 * Check if capabilities list is supported and if so,
13376 	 * get initial capabilities pointer and clear bits 0,1.
13377 	 */
13378 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13379 	    & PCI_STAT_CAP) {
13380 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13381 		    PCI_CONF_CAP_PTR), 4);
13382 	} else {
13383 		caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13384 	}
13385 
13386 	/*
13387 	 * Walk capabilities if supported.
13388 	 */
13389 	for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13390 
13391 		/*
13392 		 * Check that we haven't exceeded the maximum number of
13393 		 * capabilities and that the pointer is in a valid range.
13394 		 */
13395 		if (++cap_count > 48) {
13396 			mptsas_log(mpt, CE_WARN,
13397 			    "too many device capabilities.\n");
13398 			break;
13399 		}
13400 		if (caps_ptr < 64) {
13401 			mptsas_log(mpt, CE_WARN,
13402 			    "capabilities pointer 0x%x out of range.\n",
13403 			    caps_ptr);
13404 			break;
13405 		}
13406 
13407 		/*
13408 		 * Get next capability and check that it is valid.
13409 		 * For now, we only support power management.
13410 		 */
13411 		cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13412 		switch (cap) {
13413 			case PCI_CAP_ID_PM:
13414 				mptsas_log(mpt, CE_NOTE,
13415 				    "?mptsas%d supports power management.\n",
13416 				    mpt->m_instance);
13417 				mpt->m_options |= MPTSAS_OPT_PM;
13418 
13419 				/* Save PMCSR offset */
13420 				mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13421 				break;
13422 			/*
13423 			 * The following capabilities are valid.  Any others
13424 			 * will cause a message to be logged.
13425 			 */
13426 			case PCI_CAP_ID_VPD:
13427 			case PCI_CAP_ID_MSI:
13428 			case PCI_CAP_ID_PCIX:
13429 			case PCI_CAP_ID_PCI_E:
13430 			case PCI_CAP_ID_MSI_X:
13431 				break;
13432 			default:
13433 				mptsas_log(mpt, CE_NOTE,
13434 				    "?mptsas%d unrecognized capability "
13435 				    "0x%x.\n", mpt->m_instance, cap);
13436 				break;
13437 		}
13438 
13439 		/*
13440 		 * Get next capabilities pointer and clear bits 0,1.
13441 		 */
13442 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13443 		    (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13444 	}
13445 	return (TRUE);
13446 }
13447 
13448 static int
mptsas_init_pm(mptsas_t * mpt)13449 mptsas_init_pm(mptsas_t *mpt)
13450 {
13451 	char		pmc_name[16];
13452 	char		*pmc[] = {
13453 				NULL,
13454 				"0=Off (PCI D3 State)",
13455 				"3=On (PCI D0 State)",
13456 				NULL
13457 			};
13458 	uint16_t	pmcsr_stat;
13459 
13460 	if (mptsas_get_pci_cap(mpt) == FALSE) {
13461 		return (DDI_FAILURE);
13462 	}
13463 	/*
13464 	 * If PCI's capability does not support PM, then don't need
13465 	 * to registe the pm-components
13466 	 */
13467 	if (!(mpt->m_options & MPTSAS_OPT_PM))
13468 		return (DDI_SUCCESS);
13469 	/*
13470 	 * If power management is supported by this chip, create
13471 	 * pm-components property for the power management framework
13472 	 */
13473 	(void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13474 	pmc[0] = pmc_name;
13475 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13476 	    "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13477 		mpt->m_options &= ~MPTSAS_OPT_PM;
13478 		mptsas_log(mpt, CE_WARN,
13479 		    "mptsas%d: pm-component property creation failed.",
13480 		    mpt->m_instance);
13481 		return (DDI_FAILURE);
13482 	}
13483 
13484 	/*
13485 	 * Power on device.
13486 	 */
13487 	(void) pm_busy_component(mpt->m_dip, 0);
13488 	pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13489 	    mpt->m_pmcsr_offset);
13490 	if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13491 		mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13492 		    mpt->m_instance);
13493 		pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13494 		    PCI_PMCSR_D0);
13495 	}
13496 	if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13497 		mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13498 		return (DDI_FAILURE);
13499 	}
13500 	mpt->m_power_level = PM_LEVEL_D0;
13501 	/*
13502 	 * Set pm idle delay.
13503 	 */
13504 	mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13505 	    mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13506 
13507 	return (DDI_SUCCESS);
13508 }
13509 
13510 static int
mptsas_register_intrs(mptsas_t * mpt)13511 mptsas_register_intrs(mptsas_t *mpt)
13512 {
13513 	dev_info_t *dip;
13514 	int intr_types;
13515 
13516 	dip = mpt->m_dip;
13517 
13518 	/* Get supported interrupt types */
13519 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13520 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13521 		    "failed\n");
13522 		return (FALSE);
13523 	}
13524 
13525 	NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13526 
13527 	/*
13528 	 * Try MSI, but fall back to FIXED
13529 	 */
13530 	if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13531 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13532 			NDBG0(("Using MSI interrupt type"));
13533 			mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13534 			return (TRUE);
13535 		}
13536 	}
13537 	if (intr_types & DDI_INTR_TYPE_FIXED) {
13538 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13539 			NDBG0(("Using FIXED interrupt type"));
13540 			mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13541 			return (TRUE);
13542 		} else {
13543 			NDBG0(("FIXED interrupt registration failed"));
13544 			return (FALSE);
13545 		}
13546 	}
13547 
13548 	return (FALSE);
13549 }
13550 
13551 static void
mptsas_unregister_intrs(mptsas_t * mpt)13552 mptsas_unregister_intrs(mptsas_t *mpt)
13553 {
13554 	mptsas_rem_intrs(mpt);
13555 }
13556 
13557 /*
13558  * mptsas_add_intrs:
13559  *
13560  * Register FIXED or MSI interrupts.
13561  */
13562 static int
mptsas_add_intrs(mptsas_t * mpt,int intr_type)13563 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13564 {
13565 	dev_info_t	*dip = mpt->m_dip;
13566 	int		avail, actual, count = 0;
13567 	int		i, flag, ret;
13568 
13569 	NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13570 
13571 	/* Get number of interrupts */
13572 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13573 	if ((ret != DDI_SUCCESS) || (count <= 0)) {
13574 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13575 		    "ret %d count %d\n", ret, count);
13576 
13577 		return (DDI_FAILURE);
13578 	}
13579 
13580 	/* Get number of available interrupts */
13581 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
13582 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
13583 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13584 		    "ret %d avail %d\n", ret, avail);
13585 
13586 		return (DDI_FAILURE);
13587 	}
13588 
13589 	if (avail < count) {
13590 		mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13591 		    "navail() returned %d", count, avail);
13592 	}
13593 
13594 	/* Mpt only have one interrupt routine */
13595 	if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13596 		count = 1;
13597 	}
13598 
13599 	/* Allocate an array of interrupt handles */
13600 	mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13601 	mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13602 
13603 	flag = DDI_INTR_ALLOC_NORMAL;
13604 
13605 	/* call ddi_intr_alloc() */
13606 	ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13607 	    count, &actual, flag);
13608 
13609 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
13610 		mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13611 		    ret);
13612 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13613 		return (DDI_FAILURE);
13614 	}
13615 
13616 	/* use interrupt count returned or abort? */
13617 	if (actual < count) {
13618 		mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13619 		    count, actual);
13620 	}
13621 
13622 	mpt->m_intr_cnt = actual;
13623 
13624 	/*
13625 	 * Get priority for first msi, assume remaining are all the same
13626 	 */
13627 	if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13628 	    &mpt->m_intr_pri)) != DDI_SUCCESS) {
13629 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13630 
13631 		/* Free already allocated intr */
13632 		for (i = 0; i < actual; i++) {
13633 			(void) ddi_intr_free(mpt->m_htable[i]);
13634 		}
13635 
13636 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13637 		return (DDI_FAILURE);
13638 	}
13639 
13640 	/* Test for high level mutex */
13641 	if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13642 		mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13643 		    "Hi level interrupt not supported\n");
13644 
13645 		/* Free already allocated intr */
13646 		for (i = 0; i < actual; i++) {
13647 			(void) ddi_intr_free(mpt->m_htable[i]);
13648 		}
13649 
13650 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13651 		return (DDI_FAILURE);
13652 	}
13653 
13654 	/* Call ddi_intr_add_handler() */
13655 	for (i = 0; i < actual; i++) {
13656 		if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13657 		    (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13658 			mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13659 			    "failed %d\n", ret);
13660 
13661 			/* Free already allocated intr */
13662 			for (i = 0; i < actual; i++) {
13663 				(void) ddi_intr_free(mpt->m_htable[i]);
13664 			}
13665 
13666 			kmem_free(mpt->m_htable, mpt->m_intr_size);
13667 			return (DDI_FAILURE);
13668 		}
13669 	}
13670 
13671 	if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13672 	    != DDI_SUCCESS) {
13673 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13674 
13675 		/* Free already allocated intr */
13676 		for (i = 0; i < actual; i++) {
13677 			(void) ddi_intr_free(mpt->m_htable[i]);
13678 		}
13679 
13680 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13681 		return (DDI_FAILURE);
13682 	}
13683 
13684 	/*
13685 	 * Enable interrupts
13686 	 */
13687 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13688 		/* Call ddi_intr_block_enable() for MSI interrupts */
13689 		(void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13690 	} else {
13691 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
13692 		for (i = 0; i < mpt->m_intr_cnt; i++) {
13693 			(void) ddi_intr_enable(mpt->m_htable[i]);
13694 		}
13695 	}
13696 	return (DDI_SUCCESS);
13697 }
13698 
13699 /*
13700  * mptsas_rem_intrs:
13701  *
13702  * Unregister FIXED or MSI interrupts
13703  */
13704 static void
mptsas_rem_intrs(mptsas_t * mpt)13705 mptsas_rem_intrs(mptsas_t *mpt)
13706 {
13707 	int	i;
13708 
13709 	NDBG6(("mptsas_rem_intrs"));
13710 
13711 	/* Disable all interrupts */
13712 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13713 		/* Call ddi_intr_block_disable() */
13714 		(void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13715 	} else {
13716 		for (i = 0; i < mpt->m_intr_cnt; i++) {
13717 			(void) ddi_intr_disable(mpt->m_htable[i]);
13718 		}
13719 	}
13720 
13721 	/* Call ddi_intr_remove_handler() */
13722 	for (i = 0; i < mpt->m_intr_cnt; i++) {
13723 		(void) ddi_intr_remove_handler(mpt->m_htable[i]);
13724 		(void) ddi_intr_free(mpt->m_htable[i]);
13725 	}
13726 
13727 	kmem_free(mpt->m_htable, mpt->m_intr_size);
13728 }
13729 
13730 /*
13731  * The IO fault service error handling callback function
13732  */
13733 /*ARGSUSED*/
13734 static int
mptsas_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)13735 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13736 {
13737 	/*
13738 	 * as the driver can always deal with an error in any dma or
13739 	 * access handle, we can just return the fme_status value.
13740 	 */
13741 	pci_ereport_post(dip, err, NULL);
13742 	return (err->fme_status);
13743 }
13744 
13745 /*
13746  * mptsas_fm_init - initialize fma capabilities and register with IO
13747  *               fault services.
13748  */
13749 static void
mptsas_fm_init(mptsas_t * mpt)13750 mptsas_fm_init(mptsas_t *mpt)
13751 {
13752 	/*
13753 	 * Need to change iblock to priority for new MSI intr
13754 	 */
13755 	ddi_iblock_cookie_t	fm_ibc;
13756 
13757 	/* Only register with IO Fault Services if we have some capability */
13758 	if (mpt->m_fm_capabilities) {
13759 		/* Adjust access and dma attributes for FMA */
13760 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13761 		mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13762 		mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13763 
13764 		/*
13765 		 * Register capabilities with IO Fault Services.
13766 		 * mpt->m_fm_capabilities will be updated to indicate
13767 		 * capabilities actually supported (not requested.)
13768 		 */
13769 		ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13770 
13771 		/*
13772 		 * Initialize pci ereport capabilities if ereport
13773 		 * capable (should always be.)
13774 		 */
13775 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13776 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13777 			pci_ereport_setup(mpt->m_dip);
13778 		}
13779 
13780 		/*
13781 		 * Register error callback if error callback capable.
13782 		 */
13783 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13784 			ddi_fm_handler_register(mpt->m_dip,
13785 			    mptsas_fm_error_cb, (void *) mpt);
13786 		}
13787 	}
13788 }
13789 
13790 /*
13791  * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13792  *               fault services.
13793  *
13794  */
13795 static void
mptsas_fm_fini(mptsas_t * mpt)13796 mptsas_fm_fini(mptsas_t *mpt)
13797 {
13798 	/* Only unregister FMA capabilities if registered */
13799 	if (mpt->m_fm_capabilities) {
13800 
13801 		/*
13802 		 * Un-register error callback if error callback capable.
13803 		 */
13804 
13805 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13806 			ddi_fm_handler_unregister(mpt->m_dip);
13807 		}
13808 
13809 		/*
13810 		 * Release any resources allocated by pci_ereport_setup()
13811 		 */
13812 
13813 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13814 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13815 			pci_ereport_teardown(mpt->m_dip);
13816 		}
13817 
13818 		/* Unregister from IO Fault Services */
13819 		ddi_fm_fini(mpt->m_dip);
13820 
13821 		/* Adjust access and dma attributes for FMA */
13822 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13823 		mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13824 		mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13825 
13826 	}
13827 }
13828 
13829 int
mptsas_check_acc_handle(ddi_acc_handle_t handle)13830 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13831 {
13832 	ddi_fm_error_t	de;
13833 
13834 	if (handle == NULL)
13835 		return (DDI_FAILURE);
13836 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13837 	return (de.fme_status);
13838 }
13839 
13840 int
mptsas_check_dma_handle(ddi_dma_handle_t handle)13841 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13842 {
13843 	ddi_fm_error_t	de;
13844 
13845 	if (handle == NULL)
13846 		return (DDI_FAILURE);
13847 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13848 	return (de.fme_status);
13849 }
13850 
13851 void
mptsas_fm_ereport(mptsas_t * mpt,char * detail)13852 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13853 {
13854 	uint64_t	ena;
13855 	char		buf[FM_MAX_CLASS];
13856 
13857 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13858 	ena = fm_ena_generate(0, FM_ENA_FMT1);
13859 	if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13860 		ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13861 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13862 	}
13863 }
13864 
13865 static int
mptsas_get_target_device_info(mptsas_t * mpt,uint32_t page_address,uint16_t * dev_handle,mptsas_target_t ** pptgt)13866 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13867     uint16_t *dev_handle, mptsas_target_t **pptgt)
13868 {
13869 	int		rval;
13870 	uint32_t	dev_info;
13871 	uint64_t	sas_wwn;
13872 	mptsas_phymask_t phymask;
13873 	uint8_t		physport, phynum, config, disk;
13874 	uint64_t	devicename;
13875 	uint16_t	pdev_hdl;
13876 	mptsas_target_t	*tmp_tgt = NULL;
13877 	uint16_t	bay_num, enclosure, io_flags;
13878 
13879 	ASSERT(*pptgt == NULL);
13880 
13881 	rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13882 	    &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13883 	    &bay_num, &enclosure, &io_flags);
13884 	if (rval != DDI_SUCCESS) {
13885 		rval = DEV_INFO_FAIL_PAGE0;
13886 		return (rval);
13887 	}
13888 
13889 	if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13890 	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13891 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == 0) {
13892 		rval = DEV_INFO_WRONG_DEVICE_TYPE;
13893 		return (rval);
13894 	}
13895 
13896 	/*
13897 	 * Check if the dev handle is for a Phys Disk. If so, set return value
13898 	 * and exit.  Don't add Phys Disks to hash.
13899 	 */
13900 	for (config = 0; config < mpt->m_num_raid_configs; config++) {
13901 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13902 			if (*dev_handle == mpt->m_raidconfig[config].
13903 			    m_physdisk_devhdl[disk]) {
13904 				rval = DEV_INFO_PHYS_DISK;
13905 				return (rval);
13906 			}
13907 		}
13908 	}
13909 
13910 	/*
13911 	 * Get SATA Device Name from SAS device page0 for
13912 	 * sata device, if device name doesn't exist, set mta_wwn to
13913 	 * 0 for direct attached SATA. For the device behind the expander
13914 	 * we still can use STP address assigned by expander.
13915 	 */
13916 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13917 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13918 		/* alloc a temporary target to send the cmd to */
13919 		tmp_tgt = mptsas_tgt_alloc(mpt->m_tmp_targets, *dev_handle,
13920 		    0, dev_info, 0, 0);
13921 		mutex_exit(&mpt->m_mutex);
13922 
13923 		devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13924 
13925 		if (devicename == -1) {
13926 			mutex_enter(&mpt->m_mutex);
13927 			refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13928 			rval = DEV_INFO_FAIL_GUID;
13929 			return (rval);
13930 		}
13931 
13932 		if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13933 			sas_wwn = devicename;
13934 		} else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13935 			sas_wwn = 0;
13936 		}
13937 
13938 		mutex_enter(&mpt->m_mutex);
13939 		refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13940 	}
13941 
13942 	phymask = mptsas_physport_to_phymask(mpt, physport);
13943 	*pptgt = mptsas_tgt_alloc(mpt->m_targets, *dev_handle, sas_wwn,
13944 	    dev_info, phymask, phynum);
13945 	if (*pptgt == NULL) {
13946 		mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13947 		    "structure!");
13948 		rval = DEV_INFO_FAIL_ALLOC;
13949 		return (rval);
13950 	}
13951 	(*pptgt)->m_io_flags = io_flags;
13952 	(*pptgt)->m_enclosure = enclosure;
13953 	(*pptgt)->m_slot_num = bay_num;
13954 	return (DEV_INFO_SUCCESS);
13955 }
13956 
13957 uint64_t
mptsas_get_sata_guid(mptsas_t * mpt,mptsas_target_t * ptgt,int lun)13958 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13959 {
13960 	uint64_t	sata_guid = 0, *pwwn = NULL;
13961 	int		target = ptgt->m_devhdl;
13962 	uchar_t		*inq83 = NULL;
13963 	int		inq83_len = 0xFF;
13964 	uchar_t		*dblk = NULL;
13965 	int		inq83_retry = 3;
13966 	int		rval = DDI_FAILURE;
13967 
13968 	inq83	= kmem_zalloc(inq83_len, KM_SLEEP);
13969 
13970 inq83_retry:
13971 	rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13972 	    inq83_len, NULL, 1);
13973 	if (rval != DDI_SUCCESS) {
13974 		mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13975 		    "0x83 for target:%x, lun:%x failed!", target, lun);
13976 		sata_guid = -1;
13977 		goto out;
13978 	}
13979 	/* According to SAT2, the first descriptor is logic unit name */
13980 	dblk = &inq83[4];
13981 	if ((dblk[1] & 0x30) != 0) {
13982 		mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13983 		goto out;
13984 	}
13985 	pwwn = (uint64_t *)(void *)(&dblk[4]);
13986 	if ((dblk[4] & 0xf0) == 0x50) {
13987 		sata_guid = BE_64(*pwwn);
13988 		goto out;
13989 	} else if (dblk[4] == 'A') {
13990 		NDBG20(("SATA drive has no NAA format GUID."));
13991 		goto out;
13992 	} else {
13993 		/* The data is not ready, wait and retry */
13994 		inq83_retry--;
13995 		if (inq83_retry <= 0) {
13996 			goto out;
13997 		}
13998 		NDBG20(("The GUID is not ready, retry..."));
13999 		delay(1 * drv_usectohz(1000000));
14000 		goto inq83_retry;
14001 	}
14002 out:
14003 	kmem_free(inq83, inq83_len);
14004 	return (sata_guid);
14005 }
14006 
14007 static int
mptsas_inquiry(mptsas_t * mpt,mptsas_target_t * ptgt,int lun,uchar_t page,unsigned char * buf,int len,int * reallen,uchar_t evpd)14008 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
14009     unsigned char *buf, int len, int *reallen, uchar_t evpd)
14010 {
14011 	uchar_t			cdb[CDB_GROUP0];
14012 	struct scsi_address	ap;
14013 	struct buf		*data_bp = NULL;
14014 	int			resid = 0;
14015 	int			ret = DDI_FAILURE;
14016 
14017 	ASSERT(len <= 0xffff);
14018 
14019 	ap.a_target = MPTSAS_INVALID_DEVHDL;
14020 	ap.a_lun = (uchar_t)(lun);
14021 	ap.a_hba_tran = mpt->m_tran;
14022 
14023 	data_bp = scsi_alloc_consistent_buf(&ap,
14024 	    (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
14025 	if (data_bp == NULL) {
14026 		return (ret);
14027 	}
14028 	bzero(cdb, CDB_GROUP0);
14029 	cdb[0] = SCMD_INQUIRY;
14030 	cdb[1] = evpd;
14031 	cdb[2] = page;
14032 	cdb[3] = (len & 0xff00) >> 8;
14033 	cdb[4] = (len & 0x00ff);
14034 	cdb[5] = 0;
14035 
14036 	ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
14037 	    &resid);
14038 	if (ret == DDI_SUCCESS) {
14039 		if (reallen) {
14040 			*reallen = len - resid;
14041 		}
14042 		bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
14043 	}
14044 	if (data_bp) {
14045 		scsi_free_consistent_buf(data_bp);
14046 	}
14047 	return (ret);
14048 }
14049 
14050 static int
mptsas_send_scsi_cmd(mptsas_t * mpt,struct scsi_address * ap,mptsas_target_t * ptgt,uchar_t * cdb,int cdblen,struct buf * data_bp,int * resid)14051 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
14052     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
14053     int *resid)
14054 {
14055 	struct scsi_pkt		*pktp = NULL;
14056 	scsi_hba_tran_t		*tran_clone = NULL;
14057 	mptsas_tgt_private_t	*tgt_private = NULL;
14058 	int			ret = DDI_FAILURE;
14059 
14060 	/*
14061 	 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
14062 	 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
14063 	 * to simulate the cmds from sd
14064 	 */
14065 	tran_clone = kmem_alloc(
14066 	    sizeof (scsi_hba_tran_t), KM_SLEEP);
14067 	if (tran_clone == NULL) {
14068 		goto out;
14069 	}
14070 	bcopy((caddr_t)mpt->m_tran,
14071 	    (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
14072 	tgt_private = kmem_alloc(
14073 	    sizeof (mptsas_tgt_private_t), KM_SLEEP);
14074 	if (tgt_private == NULL) {
14075 		goto out;
14076 	}
14077 	tgt_private->t_lun = ap->a_lun;
14078 	tgt_private->t_private = ptgt;
14079 	tran_clone->tran_tgt_private = tgt_private;
14080 	ap->a_hba_tran = tran_clone;
14081 
14082 	pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
14083 	    data_bp, cdblen, sizeof (struct scsi_arq_status),
14084 	    0, PKT_CONSISTENT, NULL, NULL);
14085 	if (pktp == NULL) {
14086 		goto out;
14087 	}
14088 	bcopy(cdb, pktp->pkt_cdbp, cdblen);
14089 	pktp->pkt_flags = FLAG_NOPARITY;
14090 	if (scsi_poll(pktp) < 0) {
14091 		goto out;
14092 	}
14093 	if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
14094 		goto out;
14095 	}
14096 	if (resid != NULL) {
14097 		*resid = pktp->pkt_resid;
14098 	}
14099 
14100 	ret = DDI_SUCCESS;
14101 out:
14102 	if (pktp) {
14103 		scsi_destroy_pkt(pktp);
14104 	}
14105 	if (tran_clone) {
14106 		kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
14107 	}
14108 	if (tgt_private) {
14109 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14110 	}
14111 	return (ret);
14112 }
14113 static int
mptsas_parse_address(char * name,uint64_t * wwid,uint8_t * phy,int * lun)14114 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
14115 {
14116 	char	*cp = NULL;
14117 	char	*ptr = NULL;
14118 	size_t	s = 0;
14119 	char	*wwid_str = NULL;
14120 	char	*lun_str = NULL;
14121 	long	lunnum;
14122 	long	phyid = -1;
14123 	int	rc = DDI_FAILURE;
14124 
14125 	ptr = name;
14126 	ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
14127 	ptr++;
14128 	if ((cp = strchr(ptr, ',')) == NULL) {
14129 		return (DDI_FAILURE);
14130 	}
14131 
14132 	wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14133 	s = (uintptr_t)cp - (uintptr_t)ptr;
14134 
14135 	bcopy(ptr, wwid_str, s);
14136 	wwid_str[s] = '\0';
14137 
14138 	ptr = ++cp;
14139 
14140 	if ((cp = strchr(ptr, '\0')) == NULL) {
14141 		goto out;
14142 	}
14143 	lun_str =  kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14144 	s = (uintptr_t)cp - (uintptr_t)ptr;
14145 
14146 	bcopy(ptr, lun_str, s);
14147 	lun_str[s] = '\0';
14148 
14149 	if (name[0] == 'p') {
14150 		rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
14151 	} else {
14152 		rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
14153 	}
14154 	if (rc != DDI_SUCCESS)
14155 		goto out;
14156 
14157 	if (phyid != -1) {
14158 		ASSERT(phyid < MPTSAS_MAX_PHYS);
14159 		*phy = (uint8_t)phyid;
14160 	}
14161 	rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
14162 	if (rc != 0)
14163 		goto out;
14164 
14165 	*lun = (int)lunnum;
14166 	rc = DDI_SUCCESS;
14167 out:
14168 	if (wwid_str)
14169 		kmem_free(wwid_str, SCSI_MAXNAMELEN);
14170 	if (lun_str)
14171 		kmem_free(lun_str, SCSI_MAXNAMELEN);
14172 
14173 	return (rc);
14174 }
14175 
14176 /*
14177  * mptsas_parse_smp_name() is to parse sas wwn string
14178  * which format is "wWWN"
14179  */
14180 static int
mptsas_parse_smp_name(char * name,uint64_t * wwn)14181 mptsas_parse_smp_name(char *name, uint64_t *wwn)
14182 {
14183 	char	*ptr = name;
14184 
14185 	if (*ptr != 'w') {
14186 		return (DDI_FAILURE);
14187 	}
14188 
14189 	ptr++;
14190 	if (scsi_wwnstr_to_wwn(ptr, wwn)) {
14191 		return (DDI_FAILURE);
14192 	}
14193 	return (DDI_SUCCESS);
14194 }
14195 
14196 static int
mptsas_bus_config(dev_info_t * pdip,uint_t flag,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)14197 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
14198     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
14199 {
14200 	int		ret = NDI_FAILURE;
14201 	mptsas_t	*mpt;
14202 	char		*ptr = NULL;
14203 	char		*devnm = NULL;
14204 	uint64_t	wwid = 0;
14205 	uint8_t		phy = 0xFF;
14206 	int		lun = 0;
14207 	uint_t		mflags = flag;
14208 	int		bconfig = TRUE;
14209 
14210 	if (scsi_hba_iport_unit_address(pdip) == 0) {
14211 		return (DDI_FAILURE);
14212 	}
14213 
14214 	mpt = DIP2MPT(pdip);
14215 	if (!mpt) {
14216 		return (DDI_FAILURE);
14217 	}
14218 	/*
14219 	 * Hold the nexus across the bus_config
14220 	 */
14221 	ndi_devi_enter(scsi_vhci_dip);
14222 	ndi_devi_enter(pdip);
14223 	switch (op) {
14224 	case BUS_CONFIG_ONE:
14225 		/* parse wwid/target name out of name given */
14226 		if ((ptr = strchr((char *)arg, '@')) == NULL) {
14227 			ret = NDI_FAILURE;
14228 			break;
14229 		}
14230 		ptr++;
14231 		if (strncmp((char *)arg, "smp", 3) == 0) {
14232 			/*
14233 			 * This is a SMP target device
14234 			 */
14235 			ret = mptsas_parse_smp_name(ptr, &wwid);
14236 			if (ret != DDI_SUCCESS) {
14237 				ret = NDI_FAILURE;
14238 				break;
14239 			}
14240 			ret = mptsas_config_smp(pdip, wwid, childp);
14241 		} else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
14242 			/*
14243 			 * OBP could pass down a non-canonical form
14244 			 * bootpath without LUN part when LUN is 0.
14245 			 * So driver need adjust the string.
14246 			 */
14247 			if (strchr(ptr, ',') == NULL) {
14248 				devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14249 				(void) sprintf(devnm, "%s,0", (char *)arg);
14250 				ptr = strchr(devnm, '@');
14251 				ptr++;
14252 			}
14253 
14254 			/*
14255 			 * The device path is wWWID format and the device
14256 			 * is not SMP target device.
14257 			 */
14258 			ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
14259 			if (ret != DDI_SUCCESS) {
14260 				ret = NDI_FAILURE;
14261 				break;
14262 			}
14263 			*childp = NULL;
14264 			if (ptr[0] == 'w') {
14265 				ret = mptsas_config_one_addr(pdip, wwid,
14266 				    lun, childp);
14267 			} else if (ptr[0] == 'p') {
14268 				ret = mptsas_config_one_phy(pdip, phy, lun,
14269 				    childp);
14270 			}
14271 
14272 			/*
14273 			 * If this is CD/DVD device in OBP path, the
14274 			 * ndi_busop_bus_config can be skipped as config one
14275 			 * operation is done above.
14276 			 */
14277 			if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
14278 			    (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14279 			    (strncmp((char *)arg, "disk", 4) == 0)) {
14280 				bconfig = FALSE;
14281 				ndi_hold_devi(*childp);
14282 			}
14283 		} else {
14284 			ret = NDI_FAILURE;
14285 			break;
14286 		}
14287 
14288 		/*
14289 		 * DDI group instructed us to use this flag.
14290 		 */
14291 		mflags |= NDI_MDI_FALLBACK;
14292 		break;
14293 	case BUS_CONFIG_DRIVER:
14294 	case BUS_CONFIG_ALL:
14295 		mptsas_config_all(pdip);
14296 		ret = NDI_SUCCESS;
14297 		break;
14298 	default:
14299 		ret = NDI_FAILURE;
14300 		break;
14301 	}
14302 
14303 	if ((ret == NDI_SUCCESS) && bconfig) {
14304 		ret = ndi_busop_bus_config(pdip, mflags, op,
14305 		    (devnm == NULL) ? arg : devnm, childp, 0);
14306 	}
14307 
14308 	ndi_devi_exit(pdip);
14309 	ndi_devi_exit(scsi_vhci_dip);
14310 	if (devnm != NULL)
14311 		kmem_free(devnm, SCSI_MAXNAMELEN);
14312 	return (ret);
14313 }
14314 
14315 static int
mptsas_probe_lun(dev_info_t * pdip,int lun,dev_info_t ** dip,mptsas_target_t * ptgt)14316 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14317     mptsas_target_t *ptgt)
14318 {
14319 	int			rval = DDI_FAILURE;
14320 	struct scsi_inquiry	*sd_inq = NULL;
14321 	mptsas_t		*mpt = DIP2MPT(pdip);
14322 
14323 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14324 
14325 	rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
14326 	    SUN_INQSIZE, 0, (uchar_t)0);
14327 
14328 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14329 		rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14330 	} else {
14331 		rval = DDI_FAILURE;
14332 	}
14333 
14334 	kmem_free(sd_inq, SUN_INQSIZE);
14335 	return (rval);
14336 }
14337 
14338 static int
mptsas_config_one_addr(dev_info_t * pdip,uint64_t sasaddr,int lun,dev_info_t ** lundip)14339 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14340     dev_info_t **lundip)
14341 {
14342 	int		rval;
14343 	mptsas_t		*mpt = DIP2MPT(pdip);
14344 	int		phymask;
14345 	mptsas_target_t	*ptgt = NULL;
14346 
14347 	/*
14348 	 * Get the physical port associated to the iport
14349 	 */
14350 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14351 	    "phymask", 0);
14352 
14353 	ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14354 	if (ptgt == NULL) {
14355 		/*
14356 		 * didn't match any device by searching
14357 		 */
14358 		return (DDI_FAILURE);
14359 	}
14360 	/*
14361 	 * If the LUN already exists and the status is online,
14362 	 * we just return the pointer to dev_info_t directly.
14363 	 * For the mdi_pathinfo node, we'll handle it in
14364 	 * mptsas_create_virt_lun()
14365 	 * TODO should be also in mptsas_handle_dr
14366 	 */
14367 
14368 	*lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14369 	if (*lundip != NULL) {
14370 		/*
14371 		 * TODO Another senario is, we hotplug the same disk
14372 		 * on the same slot, the devhdl changed, is this
14373 		 * possible?
14374 		 * tgt_private->t_private != ptgt
14375 		 */
14376 		if (sasaddr != ptgt->m_addr.mta_wwn) {
14377 			/*
14378 			 * The device has changed although the devhdl is the
14379 			 * same (Enclosure mapping mode, change drive on the
14380 			 * same slot)
14381 			 */
14382 			return (DDI_FAILURE);
14383 		}
14384 		return (DDI_SUCCESS);
14385 	}
14386 
14387 	if (phymask == 0) {
14388 		/*
14389 		 * Configure IR volume
14390 		 */
14391 		rval =  mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14392 		return (rval);
14393 	}
14394 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14395 
14396 	return (rval);
14397 }
14398 
14399 static int
mptsas_config_one_phy(dev_info_t * pdip,uint8_t phy,int lun,dev_info_t ** lundip)14400 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14401     dev_info_t **lundip)
14402 {
14403 	int		rval;
14404 	mptsas_t	*mpt = DIP2MPT(pdip);
14405 	mptsas_phymask_t phymask;
14406 	mptsas_target_t	*ptgt = NULL;
14407 
14408 	/*
14409 	 * Get the physical port associated to the iport
14410 	 */
14411 	phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14412 	    "phymask", 0);
14413 
14414 	ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14415 	if (ptgt == NULL) {
14416 		/*
14417 		 * didn't match any device by searching
14418 		 */
14419 		return (DDI_FAILURE);
14420 	}
14421 
14422 	/*
14423 	 * If the LUN already exists and the status is online,
14424 	 * we just return the pointer to dev_info_t directly.
14425 	 * For the mdi_pathinfo node, we'll handle it in
14426 	 * mptsas_create_virt_lun().
14427 	 */
14428 
14429 	*lundip = mptsas_find_child_phy(pdip, phy);
14430 	if (*lundip != NULL) {
14431 		return (DDI_SUCCESS);
14432 	}
14433 
14434 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14435 
14436 	return (rval);
14437 }
14438 
14439 static int
mptsas_retrieve_lundata(int lun_cnt,uint8_t * buf,uint16_t * lun_num,uint8_t * lun_addr_type)14440 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14441     uint8_t *lun_addr_type)
14442 {
14443 	uint32_t	lun_idx = 0;
14444 
14445 	ASSERT(lun_num != NULL);
14446 	ASSERT(lun_addr_type != NULL);
14447 
14448 	lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14449 	/* determine report luns addressing type */
14450 	switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14451 		/*
14452 		 * Vendors in the field have been found to be concatenating
14453 		 * bus/target/lun to equal the complete lun value instead
14454 		 * of switching to flat space addressing
14455 		 */
14456 		/* 00b - peripheral device addressing method */
14457 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14458 		/* FALLTHRU */
14459 		/* 10b - logical unit addressing method */
14460 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14461 		/* FALLTHRU */
14462 		/* 01b - flat space addressing method */
14463 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14464 		/* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14465 		*lun_addr_type = (buf[lun_idx] &
14466 		    MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14467 		*lun_num = (buf[lun_idx] & 0x3F) << 8;
14468 		*lun_num |= buf[lun_idx + 1];
14469 		return (DDI_SUCCESS);
14470 	default:
14471 		return (DDI_FAILURE);
14472 	}
14473 }
14474 
14475 static int
mptsas_config_luns(dev_info_t * pdip,mptsas_target_t * ptgt)14476 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14477 {
14478 	struct buf		*repluns_bp = NULL;
14479 	struct scsi_address	ap;
14480 	uchar_t			cdb[CDB_GROUP5];
14481 	int			ret = DDI_FAILURE;
14482 	int			retry = 0;
14483 	int			lun_list_len = 0;
14484 	uint16_t		lun_num = 0;
14485 	uint8_t			lun_addr_type = 0;
14486 	uint32_t		lun_cnt = 0;
14487 	uint32_t		lun_total = 0;
14488 	dev_info_t		*cdip = NULL;
14489 	uint16_t		*saved_repluns = NULL;
14490 	char			*buffer = NULL;
14491 	int			buf_len = 128;
14492 	mptsas_t		*mpt = DIP2MPT(pdip);
14493 	uint64_t		sas_wwn = 0;
14494 	uint8_t			phy = 0xFF;
14495 	uint32_t		dev_info = 0;
14496 
14497 	mutex_enter(&mpt->m_mutex);
14498 	sas_wwn = ptgt->m_addr.mta_wwn;
14499 	phy = ptgt->m_phynum;
14500 	dev_info = ptgt->m_deviceinfo;
14501 	mutex_exit(&mpt->m_mutex);
14502 
14503 	if (sas_wwn == 0) {
14504 		/*
14505 		 * It's a SATA without Device Name
14506 		 * So don't try multi-LUNs
14507 		 */
14508 		if (mptsas_find_child_phy(pdip, phy)) {
14509 			return (DDI_SUCCESS);
14510 		} else {
14511 			/*
14512 			 * need configure and create node
14513 			 */
14514 			return (DDI_FAILURE);
14515 		}
14516 	}
14517 
14518 	/*
14519 	 * WWN (SAS address or Device Name exist)
14520 	 */
14521 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14522 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14523 		/*
14524 		 * SATA device with Device Name
14525 		 * So don't try multi-LUNs
14526 		 */
14527 		if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14528 			return (DDI_SUCCESS);
14529 		} else {
14530 			return (DDI_FAILURE);
14531 		}
14532 	}
14533 
14534 	do {
14535 		ap.a_target = MPTSAS_INVALID_DEVHDL;
14536 		ap.a_lun = 0;
14537 		ap.a_hba_tran = mpt->m_tran;
14538 		repluns_bp = scsi_alloc_consistent_buf(&ap,
14539 		    (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14540 		if (repluns_bp == NULL) {
14541 			retry++;
14542 			continue;
14543 		}
14544 		bzero(cdb, CDB_GROUP5);
14545 		cdb[0] = SCMD_REPORT_LUNS;
14546 		cdb[6] = (buf_len & 0xff000000) >> 24;
14547 		cdb[7] = (buf_len & 0x00ff0000) >> 16;
14548 		cdb[8] = (buf_len & 0x0000ff00) >> 8;
14549 		cdb[9] = (buf_len & 0x000000ff);
14550 
14551 		ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14552 		    repluns_bp, NULL);
14553 		if (ret != DDI_SUCCESS) {
14554 			scsi_free_consistent_buf(repluns_bp);
14555 			retry++;
14556 			continue;
14557 		}
14558 		lun_list_len = BE_32(*(int *)((void *)(
14559 		    repluns_bp->b_un.b_addr)));
14560 		if (buf_len >= lun_list_len + 8) {
14561 			ret = DDI_SUCCESS;
14562 			break;
14563 		}
14564 		scsi_free_consistent_buf(repluns_bp);
14565 		buf_len = lun_list_len + 8;
14566 
14567 	} while (retry < 3);
14568 
14569 	if (ret != DDI_SUCCESS)
14570 		return (ret);
14571 	buffer = (char *)repluns_bp->b_un.b_addr;
14572 	/*
14573 	 * find out the number of luns returned by the SCSI ReportLun call
14574 	 * and allocate buffer space
14575 	 */
14576 	lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14577 	saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14578 	if (saved_repluns == NULL) {
14579 		scsi_free_consistent_buf(repluns_bp);
14580 		return (DDI_FAILURE);
14581 	}
14582 	for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14583 		if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14584 		    &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14585 			continue;
14586 		}
14587 		saved_repluns[lun_cnt] = lun_num;
14588 		if ((cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) !=
14589 		    NULL) {
14590 			ret = DDI_SUCCESS;
14591 		} else {
14592 			ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14593 			    ptgt);
14594 		}
14595 		if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14596 			(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14597 			    MPTSAS_DEV_GONE);
14598 		}
14599 	}
14600 	mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14601 	kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14602 	scsi_free_consistent_buf(repluns_bp);
14603 	return (DDI_SUCCESS);
14604 }
14605 
14606 static int
mptsas_config_raid(dev_info_t * pdip,uint16_t target,dev_info_t ** dip)14607 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14608 {
14609 	int			rval = DDI_FAILURE;
14610 	struct scsi_inquiry	*sd_inq = NULL;
14611 	mptsas_t		*mpt = DIP2MPT(pdip);
14612 	mptsas_target_t		*ptgt = NULL;
14613 
14614 	mutex_enter(&mpt->m_mutex);
14615 	ptgt = refhash_linear_search(mpt->m_targets,
14616 	    mptsas_target_eval_devhdl, &target);
14617 	mutex_exit(&mpt->m_mutex);
14618 	if (ptgt == NULL) {
14619 		mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14620 		    "not found.", target);
14621 		return (rval);
14622 	}
14623 
14624 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14625 	rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14626 	    SUN_INQSIZE, 0, (uchar_t)0);
14627 
14628 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14629 		rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14630 		    0);
14631 	} else {
14632 		rval = DDI_FAILURE;
14633 	}
14634 
14635 	kmem_free(sd_inq, SUN_INQSIZE);
14636 	return (rval);
14637 }
14638 
14639 /*
14640  * configure all RAID volumes for virtual iport
14641  */
14642 static void
mptsas_config_all_viport(dev_info_t * pdip)14643 mptsas_config_all_viport(dev_info_t *pdip)
14644 {
14645 	mptsas_t	*mpt = DIP2MPT(pdip);
14646 	int		config, vol;
14647 	int		target;
14648 	dev_info_t	*lundip = NULL;
14649 
14650 	/*
14651 	 * Get latest RAID info and search for any Volume DevHandles.  If any
14652 	 * are found, configure the volume.
14653 	 */
14654 	mutex_enter(&mpt->m_mutex);
14655 	for (config = 0; config < mpt->m_num_raid_configs; config++) {
14656 		for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14657 			if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14658 			    == 1) {
14659 				target = mpt->m_raidconfig[config].
14660 				    m_raidvol[vol].m_raidhandle;
14661 				mutex_exit(&mpt->m_mutex);
14662 				(void) mptsas_config_raid(pdip, target,
14663 				    &lundip);
14664 				mutex_enter(&mpt->m_mutex);
14665 			}
14666 		}
14667 	}
14668 	mutex_exit(&mpt->m_mutex);
14669 }
14670 
14671 static void
mptsas_offline_missed_luns(dev_info_t * pdip,uint16_t * repluns,int lun_cnt,mptsas_target_t * ptgt)14672 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14673     int lun_cnt, mptsas_target_t *ptgt)
14674 {
14675 	dev_info_t	*child = NULL, *savechild = NULL;
14676 	mdi_pathinfo_t	*pip = NULL, *savepip = NULL;
14677 	uint64_t	sas_wwn, wwid;
14678 	uint8_t		phy;
14679 	int		lun;
14680 	int		i;
14681 	int		find;
14682 	char		*addr;
14683 	char		*nodename;
14684 	mptsas_t	*mpt = DIP2MPT(pdip);
14685 
14686 	mutex_enter(&mpt->m_mutex);
14687 	wwid = ptgt->m_addr.mta_wwn;
14688 	mutex_exit(&mpt->m_mutex);
14689 
14690 	child = ddi_get_child(pdip);
14691 	while (child) {
14692 		find = 0;
14693 		savechild = child;
14694 		child = ddi_get_next_sibling(child);
14695 
14696 		nodename = ddi_node_name(savechild);
14697 		if (strcmp(nodename, "smp") == 0) {
14698 			continue;
14699 		}
14700 
14701 		addr = ddi_get_name_addr(savechild);
14702 		if (addr == NULL) {
14703 			continue;
14704 		}
14705 
14706 		if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14707 		    DDI_SUCCESS) {
14708 			continue;
14709 		}
14710 
14711 		if (wwid == sas_wwn) {
14712 			for (i = 0; i < lun_cnt; i++) {
14713 				if (repluns[i] == lun) {
14714 					find = 1;
14715 					break;
14716 				}
14717 			}
14718 		} else {
14719 			continue;
14720 		}
14721 		if (find == 0) {
14722 			/*
14723 			 * The lun has not been there already
14724 			 */
14725 			(void) mptsas_offline_lun(pdip, savechild, NULL,
14726 			    NDI_DEVI_REMOVE);
14727 		}
14728 	}
14729 
14730 	pip = mdi_get_next_client_path(pdip, NULL);
14731 	while (pip) {
14732 		find = 0;
14733 		savepip = pip;
14734 		addr = MDI_PI(pip)->pi_addr;
14735 
14736 		pip = mdi_get_next_client_path(pdip, pip);
14737 
14738 		if (addr == NULL) {
14739 			continue;
14740 		}
14741 
14742 		if (mptsas_parse_address(addr, &sas_wwn, &phy,
14743 		    &lun) != DDI_SUCCESS) {
14744 			continue;
14745 		}
14746 
14747 		if (sas_wwn == wwid) {
14748 			for (i = 0; i < lun_cnt; i++) {
14749 				if (repluns[i] == lun) {
14750 					find = 1;
14751 					break;
14752 				}
14753 			}
14754 		} else {
14755 			continue;
14756 		}
14757 
14758 		if (find == 0) {
14759 			/*
14760 			 * The lun has not been there already
14761 			 */
14762 			(void) mptsas_offline_lun(pdip, NULL, savepip,
14763 			    NDI_DEVI_REMOVE);
14764 		}
14765 	}
14766 }
14767 
14768 /*
14769  * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14770  * update it.
14771  */
14772 static void
mptsas_enclosure_update(mptsas_t * mpt,mptsas_enclosure_t * mep)14773 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14774 {
14775 	mptsas_enclosure_t *m;
14776 
14777 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
14778 	m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14779 	if (m != NULL) {
14780 		uint8_t *ledp;
14781 		m->me_flags = mep->me_flags;
14782 
14783 
14784 		/*
14785 		 * If the number of slots and the first slot entry in the
14786 		 * enclosure has not changed, then we don't need to do anything
14787 		 * here. Otherwise, we need to allocate a new array for the LED
14788 		 * status of the slot.
14789 		 */
14790 		if (m->me_fslot == mep->me_fslot &&
14791 		    m->me_nslots == mep->me_nslots)
14792 			return;
14793 
14794 		/*
14795 		 * If the number of slots or the first slot has changed, it's
14796 		 * not clear that we're really in a place that we can continue
14797 		 * to honor the existing flags.
14798 		 */
14799 		if (mep->me_nslots > 0) {
14800 			ledp = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14801 			    KM_SLEEP);
14802 		} else {
14803 			ledp = NULL;
14804 		}
14805 
14806 		if (m->me_slotleds != NULL) {
14807 			kmem_free(m->me_slotleds, sizeof (uint8_t) *
14808 			    m->me_nslots);
14809 		}
14810 		m->me_slotleds = ledp;
14811 		m->me_fslot = mep->me_fslot;
14812 		m->me_nslots = mep->me_nslots;
14813 		return;
14814 	}
14815 
14816 	m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14817 	m->me_enchdl = mep->me_enchdl;
14818 	m->me_flags = mep->me_flags;
14819 	m->me_nslots = mep->me_nslots;
14820 	m->me_fslot = mep->me_fslot;
14821 	if (m->me_nslots > 0) {
14822 		m->me_slotleds = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14823 		    KM_SLEEP);
14824 		/*
14825 		 * It may make sense to optionally flush all of the slots and/or
14826 		 * read the slot status flag here to synchronize between
14827 		 * ourselves and the card. So far, that hasn't been needed
14828 		 * annecdotally when enumerating something new. If we do, we
14829 		 * should kick that off in a taskq potentially.
14830 		 */
14831 	}
14832 	list_insert_tail(&mpt->m_enclosures, m);
14833 }
14834 
14835 static void
mptsas_update_hashtab(struct mptsas * mpt)14836 mptsas_update_hashtab(struct mptsas *mpt)
14837 {
14838 	uint32_t	page_address;
14839 	int		rval = 0;
14840 	uint16_t	dev_handle;
14841 	mptsas_target_t	*ptgt = NULL;
14842 	mptsas_smp_t	smp_node;
14843 
14844 	/*
14845 	 * Get latest RAID info.
14846 	 */
14847 	(void) mptsas_get_raid_info(mpt);
14848 
14849 	dev_handle = mpt->m_smp_devhdl;
14850 	while (mpt->m_done_traverse_smp == 0) {
14851 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14852 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14853 		if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14854 		    != DDI_SUCCESS) {
14855 			break;
14856 		}
14857 		mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14858 		(void) mptsas_smp_alloc(mpt, &smp_node);
14859 	}
14860 
14861 	/*
14862 	 * Loop over enclosures so we can understand what's there.
14863 	 */
14864 	dev_handle = MPTSAS_INVALID_DEVHDL;
14865 	while (mpt->m_done_traverse_enc == 0) {
14866 		mptsas_enclosure_t me;
14867 
14868 		page_address = (MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE &
14869 		    MPI2_SAS_ENCLOS_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14870 
14871 		if (mptsas_get_enclosure_page0(mpt, page_address, &me) !=
14872 		    DDI_SUCCESS) {
14873 			break;
14874 		}
14875 		dev_handle = me.me_enchdl;
14876 		mptsas_enclosure_update(mpt, &me);
14877 	}
14878 
14879 	/*
14880 	 * Config target devices
14881 	 */
14882 	dev_handle = mpt->m_dev_handle;
14883 
14884 	/*
14885 	 * Loop to get sas device page 0 by GetNextHandle till the
14886 	 * the last handle. If the sas device is a SATA/SSP target,
14887 	 * we try to config it.
14888 	 */
14889 	while (mpt->m_done_traverse_dev == 0) {
14890 		ptgt = NULL;
14891 		page_address =
14892 		    (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14893 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14894 		    (uint32_t)dev_handle;
14895 		rval = mptsas_get_target_device_info(mpt, page_address,
14896 		    &dev_handle, &ptgt);
14897 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
14898 		    (rval == DEV_INFO_FAIL_ALLOC)) {
14899 			break;
14900 		}
14901 		if (rval == DEV_INFO_FAIL_GUID) {
14902 			continue;
14903 		}
14904 
14905 		mpt->m_dev_handle = dev_handle;
14906 	}
14907 
14908 }
14909 
14910 void
mptsas_update_driver_data(struct mptsas * mpt)14911 mptsas_update_driver_data(struct mptsas *mpt)
14912 {
14913 	mptsas_target_t *tp;
14914 	mptsas_smp_t *sp;
14915 
14916 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
14917 
14918 	/*
14919 	 * TODO after hard reset, update the driver data structures
14920 	 * 1. update port/phymask mapping table mpt->m_phy_info
14921 	 * 2. invalid all the entries in hash table
14922 	 *    m_devhdl = 0xffff and m_deviceinfo = 0
14923 	 * 3. call sas_device_page/expander_page to update hash table
14924 	 */
14925 	mptsas_update_phymask(mpt);
14926 
14927 	/*
14928 	 * Remove all the devhdls for existing entries but leave their
14929 	 * addresses alone.  In update_hashtab() below, we'll find all
14930 	 * targets that are still present and reassociate them with
14931 	 * their potentially new devhdls.  Leaving the targets around in
14932 	 * this fashion allows them to be used on the tx waitq even
14933 	 * while IOC reset is occurring.
14934 	 */
14935 	for (tp = refhash_first(mpt->m_targets); tp != NULL;
14936 	    tp = refhash_next(mpt->m_targets, tp)) {
14937 		tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14938 		tp->m_deviceinfo = 0;
14939 		tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14940 	}
14941 	for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14942 	    sp = refhash_next(mpt->m_smp_targets, sp)) {
14943 		sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14944 		sp->m_deviceinfo = 0;
14945 	}
14946 	mpt->m_done_traverse_dev = 0;
14947 	mpt->m_done_traverse_smp = 0;
14948 	mpt->m_done_traverse_enc = 0;
14949 	mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14950 	mptsas_update_hashtab(mpt);
14951 }
14952 
14953 static void
mptsas_config_all(dev_info_t * pdip)14954 mptsas_config_all(dev_info_t *pdip)
14955 {
14956 	dev_info_t	*smpdip = NULL;
14957 	mptsas_t	*mpt = DIP2MPT(pdip);
14958 	int		phymask = 0;
14959 	mptsas_phymask_t phy_mask;
14960 	mptsas_target_t	*ptgt = NULL;
14961 	mptsas_smp_t	*psmp;
14962 
14963 	/*
14964 	 * Get the phymask associated to the iport
14965 	 */
14966 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14967 	    "phymask", 0);
14968 
14969 	/*
14970 	 * Enumerate RAID volumes here (phymask == 0).
14971 	 */
14972 	if (phymask == 0) {
14973 		mptsas_config_all_viport(pdip);
14974 		return;
14975 	}
14976 
14977 	mutex_enter(&mpt->m_mutex);
14978 
14979 	if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
14980 	    !mpt->m_done_traverse_enc) {
14981 		mptsas_update_hashtab(mpt);
14982 	}
14983 
14984 	for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14985 	    psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14986 		phy_mask = psmp->m_addr.mta_phymask;
14987 		if (phy_mask == phymask) {
14988 			smpdip = NULL;
14989 			mutex_exit(&mpt->m_mutex);
14990 			(void) mptsas_online_smp(pdip, psmp, &smpdip);
14991 			mutex_enter(&mpt->m_mutex);
14992 		}
14993 	}
14994 
14995 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14996 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
14997 		phy_mask = ptgt->m_addr.mta_phymask;
14998 		if (phy_mask == phymask) {
14999 			mutex_exit(&mpt->m_mutex);
15000 			(void) mptsas_config_target(pdip, ptgt);
15001 			mutex_enter(&mpt->m_mutex);
15002 		}
15003 	}
15004 	mutex_exit(&mpt->m_mutex);
15005 }
15006 
15007 static int
mptsas_config_target(dev_info_t * pdip,mptsas_target_t * ptgt)15008 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
15009 {
15010 	int		rval = DDI_FAILURE;
15011 	dev_info_t	*tdip;
15012 
15013 	rval = mptsas_config_luns(pdip, ptgt);
15014 	if (rval != DDI_SUCCESS) {
15015 		/*
15016 		 * The return value means the SCMD_REPORT_LUNS
15017 		 * did not execute successfully. The target maybe
15018 		 * doesn't support such command.
15019 		 */
15020 		rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
15021 	}
15022 	return (rval);
15023 }
15024 
15025 /*
15026  * Return fail if not all the childs/paths are freed.
15027  * if there is any path under the HBA, the return value will be always fail
15028  * because we didn't call mdi_pi_free for path
15029  */
15030 static int
mptsas_offline_target(dev_info_t * pdip,char * name)15031 mptsas_offline_target(dev_info_t *pdip, char *name)
15032 {
15033 	dev_info_t		*child = NULL, *prechild = NULL;
15034 	mdi_pathinfo_t		*pip = NULL, *savepip = NULL;
15035 	int			tmp_rval, rval = DDI_SUCCESS;
15036 	char			*addr, *cp;
15037 	size_t			s;
15038 	mptsas_t		*mpt = DIP2MPT(pdip);
15039 
15040 	child = ddi_get_child(pdip);
15041 	while (child) {
15042 		addr = ddi_get_name_addr(child);
15043 		prechild = child;
15044 		child = ddi_get_next_sibling(child);
15045 
15046 		if (addr == NULL) {
15047 			continue;
15048 		}
15049 		if ((cp = strchr(addr, ',')) == NULL) {
15050 			continue;
15051 		}
15052 
15053 		s = (uintptr_t)cp - (uintptr_t)addr;
15054 
15055 		if (strncmp(addr, name, s) != 0) {
15056 			continue;
15057 		}
15058 
15059 		tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
15060 		    NDI_DEVI_REMOVE);
15061 		if (tmp_rval != DDI_SUCCESS) {
15062 			rval = DDI_FAILURE;
15063 			if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15064 			    prechild, MPTSAS_DEV_GONE) !=
15065 			    DDI_PROP_SUCCESS) {
15066 				mptsas_log(mpt, CE_WARN, "mptsas driver "
15067 				    "unable to create property for "
15068 				    "SAS %s (MPTSAS_DEV_GONE)", addr);
15069 			}
15070 		}
15071 	}
15072 
15073 	pip = mdi_get_next_client_path(pdip, NULL);
15074 	while (pip) {
15075 		addr = MDI_PI(pip)->pi_addr;
15076 		savepip = pip;
15077 		pip = mdi_get_next_client_path(pdip, pip);
15078 		if (addr == NULL) {
15079 			continue;
15080 		}
15081 
15082 		if ((cp = strchr(addr, ',')) == NULL) {
15083 			continue;
15084 		}
15085 
15086 		s = (uintptr_t)cp - (uintptr_t)addr;
15087 
15088 		if (strncmp(addr, name, s) != 0) {
15089 			continue;
15090 		}
15091 
15092 		(void) mptsas_offline_lun(pdip, NULL, savepip,
15093 		    NDI_DEVI_REMOVE);
15094 		/*
15095 		 * driver will not invoke mdi_pi_free, so path will not
15096 		 * be freed forever, return DDI_FAILURE.
15097 		 */
15098 		rval = DDI_FAILURE;
15099 	}
15100 	return (rval);
15101 }
15102 
15103 static int
mptsas_offline_lun(dev_info_t * pdip,dev_info_t * rdip,mdi_pathinfo_t * rpip,uint_t flags)15104 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
15105     mdi_pathinfo_t *rpip, uint_t flags)
15106 {
15107 	int		rval = DDI_FAILURE;
15108 	char		*devname;
15109 	dev_info_t	*cdip, *parent;
15110 
15111 	if (rpip != NULL) {
15112 		parent = scsi_vhci_dip;
15113 		cdip = mdi_pi_get_client(rpip);
15114 	} else if (rdip != NULL) {
15115 		parent = pdip;
15116 		cdip = rdip;
15117 	} else {
15118 		return (DDI_FAILURE);
15119 	}
15120 
15121 	/*
15122 	 * Make sure node is attached otherwise
15123 	 * it won't have related cache nodes to
15124 	 * clean up.  i_ddi_devi_attached is
15125 	 * similiar to i_ddi_node_state(cdip) >=
15126 	 * DS_ATTACHED.
15127 	 */
15128 	if (i_ddi_devi_attached(cdip)) {
15129 
15130 		/* Get full devname */
15131 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15132 		(void) ddi_deviname(cdip, devname);
15133 		/* Clean cache */
15134 		(void) devfs_clean(parent, devname + 1,
15135 		    DV_CLEAN_FORCE);
15136 		kmem_free(devname, MAXNAMELEN + 1);
15137 	}
15138 	if (rpip != NULL) {
15139 		if (MDI_PI_IS_OFFLINE(rpip)) {
15140 			rval = DDI_SUCCESS;
15141 		} else {
15142 			rval = mdi_pi_offline(rpip, 0);
15143 		}
15144 	} else {
15145 		rval = ndi_devi_offline(cdip, flags);
15146 	}
15147 
15148 	return (rval);
15149 }
15150 
15151 static dev_info_t *
mptsas_find_smp_child(dev_info_t * parent,char * str_wwn)15152 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
15153 {
15154 	dev_info_t	*child = NULL;
15155 	char		*smp_wwn = NULL;
15156 
15157 	child = ddi_get_child(parent);
15158 	while (child) {
15159 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15160 		    DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15161 		    != DDI_SUCCESS) {
15162 			child = ddi_get_next_sibling(child);
15163 			continue;
15164 		}
15165 
15166 		if (strcmp(smp_wwn, str_wwn) == 0) {
15167 			ddi_prop_free(smp_wwn);
15168 			break;
15169 		}
15170 		child = ddi_get_next_sibling(child);
15171 		ddi_prop_free(smp_wwn);
15172 	}
15173 	return (child);
15174 }
15175 
15176 static int
mptsas_offline_smp(dev_info_t * pdip,mptsas_smp_t * smp_node,uint_t flags)15177 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
15178 {
15179 	int		rval = DDI_FAILURE;
15180 	char		*devname;
15181 	char		wwn_str[MPTSAS_WWN_STRLEN];
15182 	dev_info_t	*cdip;
15183 
15184 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15185 
15186 	cdip = mptsas_find_smp_child(pdip, wwn_str);
15187 
15188 	if (cdip == NULL)
15189 		return (DDI_SUCCESS);
15190 
15191 	/*
15192 	 * Make sure node is attached otherwise
15193 	 * it won't have related cache nodes to
15194 	 * clean up.  i_ddi_devi_attached is
15195 	 * similiar to i_ddi_node_state(cdip) >=
15196 	 * DS_ATTACHED.
15197 	 */
15198 	if (i_ddi_devi_attached(cdip)) {
15199 
15200 		/* Get full devname */
15201 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15202 		(void) ddi_deviname(cdip, devname);
15203 		/* Clean cache */
15204 		(void) devfs_clean(pdip, devname + 1,
15205 		    DV_CLEAN_FORCE);
15206 		kmem_free(devname, MAXNAMELEN + 1);
15207 	}
15208 
15209 	rval = ndi_devi_offline(cdip, flags);
15210 
15211 	return (rval);
15212 }
15213 
15214 static dev_info_t *
mptsas_find_child(dev_info_t * pdip,char * name)15215 mptsas_find_child(dev_info_t *pdip, char *name)
15216 {
15217 	dev_info_t	*child = NULL;
15218 	char		*rname = NULL;
15219 	int		rval = DDI_FAILURE;
15220 
15221 	rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15222 
15223 	child = ddi_get_child(pdip);
15224 	while (child) {
15225 		rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15226 		if (rval != DDI_SUCCESS) {
15227 			child = ddi_get_next_sibling(child);
15228 			bzero(rname, SCSI_MAXNAMELEN);
15229 			continue;
15230 		}
15231 
15232 		if (strcmp(rname, name) == 0) {
15233 			break;
15234 		}
15235 		child = ddi_get_next_sibling(child);
15236 		bzero(rname, SCSI_MAXNAMELEN);
15237 	}
15238 
15239 	kmem_free(rname, SCSI_MAXNAMELEN);
15240 
15241 	return (child);
15242 }
15243 
15244 
15245 static dev_info_t *
mptsas_find_child_addr(dev_info_t * pdip,uint64_t sasaddr,int lun)15246 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
15247 {
15248 	dev_info_t	*child = NULL;
15249 	char		*name = NULL;
15250 	char		*addr = NULL;
15251 
15252 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15253 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15254 	(void) sprintf(name, "%016"PRIx64, sasaddr);
15255 	(void) sprintf(addr, "w%s,%x", name, lun);
15256 	child = mptsas_find_child(pdip, addr);
15257 	kmem_free(name, SCSI_MAXNAMELEN);
15258 	kmem_free(addr, SCSI_MAXNAMELEN);
15259 	return (child);
15260 }
15261 
15262 static dev_info_t *
mptsas_find_child_phy(dev_info_t * pdip,uint8_t phy)15263 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
15264 {
15265 	dev_info_t	*child;
15266 	char		*addr;
15267 
15268 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15269 	(void) sprintf(addr, "p%x,0", phy);
15270 	child = mptsas_find_child(pdip, addr);
15271 	kmem_free(addr, SCSI_MAXNAMELEN);
15272 	return (child);
15273 }
15274 
15275 static mdi_pathinfo_t *
mptsas_find_path_phy(dev_info_t * pdip,uint8_t phy)15276 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
15277 {
15278 	mdi_pathinfo_t	*path;
15279 	char		*addr = NULL;
15280 
15281 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15282 	(void) sprintf(addr, "p%x,0", phy);
15283 	path = mdi_pi_find(pdip, NULL, addr);
15284 	kmem_free(addr, SCSI_MAXNAMELEN);
15285 	return (path);
15286 }
15287 
15288 static mdi_pathinfo_t *
mptsas_find_path_addr(dev_info_t * parent,uint64_t sasaddr,int lun)15289 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
15290 {
15291 	mdi_pathinfo_t	*path;
15292 	char		*name = NULL;
15293 	char		*addr = NULL;
15294 
15295 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15296 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15297 	(void) sprintf(name, "%016"PRIx64, sasaddr);
15298 	(void) sprintf(addr, "w%s,%x", name, lun);
15299 	path = mdi_pi_find(parent, NULL, addr);
15300 	kmem_free(name, SCSI_MAXNAMELEN);
15301 	kmem_free(addr, SCSI_MAXNAMELEN);
15302 
15303 	return (path);
15304 }
15305 
15306 static int
mptsas_create_lun(dev_info_t * pdip,struct scsi_inquiry * sd_inq,dev_info_t ** lun_dip,mptsas_target_t * ptgt,int lun)15307 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
15308     dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15309 {
15310 	int			i = 0;
15311 	uchar_t			*inq83 = NULL;
15312 	int			inq83_len1 = 0xFF;
15313 	int			inq83_len = 0;
15314 	int			rval = DDI_FAILURE;
15315 	ddi_devid_t		devid;
15316 	char			*guid = NULL;
15317 	int			target = ptgt->m_devhdl;
15318 	mdi_pathinfo_t		*pip = NULL;
15319 	mptsas_t		*mpt = DIP2MPT(pdip);
15320 
15321 	/*
15322 	 * For DVD/CD ROM and tape devices and optical
15323 	 * devices, we won't try to enumerate them under
15324 	 * scsi_vhci, so no need to try page83
15325 	 */
15326 	if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15327 	    sd_inq->inq_dtype == DTYPE_OPTICAL ||
15328 	    sd_inq->inq_dtype == DTYPE_ESI))
15329 		goto create_lun;
15330 
15331 	/*
15332 	 * The LCA returns good SCSI status, but corrupt page 83 data the first
15333 	 * time it is queried. The solution is to keep trying to request page83
15334 	 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15335 	 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15336 	 * give up to get VPD page at this stage and fail the enumeration.
15337 	 */
15338 
15339 	inq83	= kmem_zalloc(inq83_len1, KM_SLEEP);
15340 
15341 	for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15342 		rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15343 		    inq83_len1, &inq83_len, 1);
15344 		if (rval != 0) {
15345 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
15346 			    "0x83 for target:%x, lun:%x failed!", target, lun);
15347 			if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15348 				goto create_lun;
15349 			goto out;
15350 		}
15351 		/*
15352 		 * create DEVID from inquiry data
15353 		 */
15354 		if ((rval = ddi_devid_scsi_encode(
15355 		    DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15356 		    sizeof (struct scsi_inquiry), NULL, 0, inq83,
15357 		    (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15358 			/*
15359 			 * extract GUID from DEVID
15360 			 */
15361 			guid = ddi_devid_to_guid(devid);
15362 
15363 			/*
15364 			 * Do not enable MPXIO if the strlen(guid) is greater
15365 			 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15366 			 * handled by framework later.
15367 			 */
15368 			if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15369 				ddi_devid_free_guid(guid);
15370 				guid = NULL;
15371 				if (mpt->m_mpxio_enable == TRUE) {
15372 					mptsas_log(mpt, CE_NOTE, "!Target:%x, "
15373 					    "lun:%x doesn't have a valid GUID, "
15374 					    "multipathing for this drive is "
15375 					    "not enabled", target, lun);
15376 				}
15377 			}
15378 
15379 			/*
15380 			 * devid no longer needed
15381 			 */
15382 			ddi_devid_free(devid);
15383 			break;
15384 		} else if (rval == DDI_NOT_WELL_FORMED) {
15385 			/*
15386 			 * return value of ddi_devid_scsi_encode equal to
15387 			 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15388 			 * to retry inquiry page 0x83 and get GUID.
15389 			 */
15390 			NDBG20(("Not well formed devid, retry..."));
15391 			delay(1 * drv_usectohz(1000000));
15392 			continue;
15393 		} else {
15394 			mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
15395 			    "path target:%x, lun:%x", target, lun);
15396 			rval = DDI_FAILURE;
15397 			goto create_lun;
15398 		}
15399 	}
15400 
15401 	if (i == mptsas_inq83_retry_timeout) {
15402 		mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
15403 		    "for path target:%x, lun:%x", target, lun);
15404 	}
15405 
15406 	rval = DDI_FAILURE;
15407 
15408 create_lun:
15409 	if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15410 		rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15411 		    ptgt, lun);
15412 	}
15413 	if (rval != DDI_SUCCESS) {
15414 		rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15415 		    ptgt, lun);
15416 
15417 	}
15418 out:
15419 	if (guid != NULL) {
15420 		/*
15421 		 * guid no longer needed
15422 		 */
15423 		ddi_devid_free_guid(guid);
15424 	}
15425 	if (inq83 != NULL)
15426 		kmem_free(inq83, inq83_len1);
15427 	return (rval);
15428 }
15429 
15430 static int
mptsas_create_virt_lun(dev_info_t * pdip,struct scsi_inquiry * inq,char * guid,dev_info_t ** lun_dip,mdi_pathinfo_t ** pip,mptsas_target_t * ptgt,int lun)15431 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
15432     dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
15433 {
15434 	int			target;
15435 	char			*nodename = NULL;
15436 	char			**compatible = NULL;
15437 	int			ncompatible	= 0;
15438 	int			mdi_rtn = MDI_FAILURE;
15439 	int			rval = DDI_FAILURE;
15440 	char			*old_guid = NULL;
15441 	mptsas_t		*mpt = DIP2MPT(pdip);
15442 	char			*lun_addr = NULL;
15443 	char			*wwn_str = NULL;
15444 	char			*attached_wwn_str = NULL;
15445 	char			*component = NULL;
15446 	uint8_t			phy = 0xFF;
15447 	uint64_t		sas_wwn;
15448 	int64_t			lun64 = 0;
15449 	uint32_t		devinfo;
15450 	uint16_t		dev_hdl;
15451 	uint16_t		pdev_hdl;
15452 	uint64_t		dev_sas_wwn;
15453 	uint64_t		pdev_sas_wwn;
15454 	uint32_t		pdev_info;
15455 	uint8_t			physport;
15456 	uint8_t			phy_id;
15457 	uint32_t		page_address;
15458 	uint16_t		bay_num, enclosure, io_flags;
15459 	char			pdev_wwn_str[MPTSAS_WWN_STRLEN];
15460 	uint32_t		dev_info;
15461 
15462 	mutex_enter(&mpt->m_mutex);
15463 	target = ptgt->m_devhdl;
15464 	sas_wwn = ptgt->m_addr.mta_wwn;
15465 	devinfo = ptgt->m_deviceinfo;
15466 	phy = ptgt->m_phynum;
15467 	mutex_exit(&mpt->m_mutex);
15468 
15469 	if (sas_wwn) {
15470 		*pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15471 	} else {
15472 		*pip = mptsas_find_path_phy(pdip, phy);
15473 	}
15474 
15475 	if (*pip != NULL) {
15476 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15477 		ASSERT(*lun_dip != NULL);
15478 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15479 		    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15480 		    MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15481 			if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15482 				/*
15483 				 * Same path back online again.
15484 				 */
15485 				(void) ddi_prop_free(old_guid);
15486 				if ((!MDI_PI_IS_ONLINE(*pip)) &&
15487 				    (!MDI_PI_IS_STANDBY(*pip)) &&
15488 				    (ptgt->m_tgt_unconfigured == 0)) {
15489 					rval = mdi_pi_online(*pip, 0);
15490 				} else {
15491 					rval = DDI_SUCCESS;
15492 				}
15493 				if (rval != DDI_SUCCESS) {
15494 					mptsas_log(mpt, CE_WARN, "path:target: "
15495 					    "%x, lun:%x online failed!", target,
15496 					    lun);
15497 					*pip = NULL;
15498 					*lun_dip = NULL;
15499 				}
15500 				return (rval);
15501 			} else {
15502 				/*
15503 				 * The GUID of the LUN has changed which maybe
15504 				 * because customer mapped another volume to the
15505 				 * same LUN.
15506 				 */
15507 				mptsas_log(mpt, CE_WARN, "The GUID of the "
15508 				    "target:%x, lun:%x was changed, maybe "
15509 				    "because someone mapped another volume "
15510 				    "to the same LUN", target, lun);
15511 				(void) ddi_prop_free(old_guid);
15512 				if (!MDI_PI_IS_OFFLINE(*pip)) {
15513 					rval = mdi_pi_offline(*pip, 0);
15514 					if (rval != MDI_SUCCESS) {
15515 						mptsas_log(mpt, CE_WARN, "path:"
15516 						    "target:%x, lun:%x offline "
15517 						    "failed!", target, lun);
15518 						*pip = NULL;
15519 						*lun_dip = NULL;
15520 						return (DDI_FAILURE);
15521 					}
15522 				}
15523 				if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15524 					mptsas_log(mpt, CE_WARN, "path:target:"
15525 					    "%x, lun:%x free failed!", target,
15526 					    lun);
15527 					*pip = NULL;
15528 					*lun_dip = NULL;
15529 					return (DDI_FAILURE);
15530 				}
15531 			}
15532 		} else {
15533 			mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15534 			    "property for path:target:%x, lun:%x", target, lun);
15535 			*pip = NULL;
15536 			*lun_dip = NULL;
15537 			return (DDI_FAILURE);
15538 		}
15539 	}
15540 	scsi_hba_nodename_compatible_get(inq, NULL,
15541 	    inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15542 
15543 	/*
15544 	 * if nodename can't be determined then print a message and skip it
15545 	 */
15546 	if (nodename == NULL) {
15547 		mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15548 		    "driver for target%d lun %d dtype:0x%02x", target, lun,
15549 		    inq->inq_dtype);
15550 		return (DDI_FAILURE);
15551 	}
15552 
15553 	wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15554 	/* The property is needed by MPAPI */
15555 	(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15556 
15557 	lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15558 	if (guid) {
15559 		(void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15560 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15561 	} else {
15562 		(void) sprintf(lun_addr, "p%x,%x", phy, lun);
15563 		(void) sprintf(wwn_str, "p%x", phy);
15564 	}
15565 
15566 	mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15567 	    guid, lun_addr, compatible, ncompatible,
15568 	    0, pip);
15569 	if (mdi_rtn == MDI_SUCCESS) {
15570 
15571 		if (mdi_prop_update_string(*pip, MDI_GUID,
15572 		    guid) != DDI_SUCCESS) {
15573 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15574 			    "create prop for target %d lun %d (MDI_GUID)",
15575 			    target, lun);
15576 			mdi_rtn = MDI_FAILURE;
15577 			goto virt_create_done;
15578 		}
15579 
15580 		if (mdi_prop_update_int(*pip, LUN_PROP,
15581 		    lun) != DDI_SUCCESS) {
15582 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15583 			    "create prop for target %d lun %d (LUN_PROP)",
15584 			    target, lun);
15585 			mdi_rtn = MDI_FAILURE;
15586 			goto virt_create_done;
15587 		}
15588 		lun64 = (int64_t)lun;
15589 		if (mdi_prop_update_int64(*pip, LUN64_PROP,
15590 		    lun64) != DDI_SUCCESS) {
15591 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15592 			    "create prop for target %d (LUN64_PROP)",
15593 			    target);
15594 			mdi_rtn = MDI_FAILURE;
15595 			goto virt_create_done;
15596 		}
15597 		if (mdi_prop_update_string_array(*pip, "compatible",
15598 		    compatible, ncompatible) !=
15599 		    DDI_PROP_SUCCESS) {
15600 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15601 			    "create prop for target %d lun %d (COMPATIBLE)",
15602 			    target, lun);
15603 			mdi_rtn = MDI_FAILURE;
15604 			goto virt_create_done;
15605 		}
15606 		if (sas_wwn && (mdi_prop_update_string(*pip,
15607 		    SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15608 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15609 			    "create prop for target %d lun %d "
15610 			    "(target-port)", target, lun);
15611 			mdi_rtn = MDI_FAILURE;
15612 			goto virt_create_done;
15613 		} else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15614 		    "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15615 			/*
15616 			 * Direct attached SATA device without DeviceName
15617 			 */
15618 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15619 			    "create prop for SAS target %d lun %d "
15620 			    "(sata-phy)", target, lun);
15621 			mdi_rtn = MDI_FAILURE;
15622 			goto virt_create_done;
15623 		}
15624 		mutex_enter(&mpt->m_mutex);
15625 
15626 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15627 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15628 		    (uint32_t)ptgt->m_devhdl;
15629 		rval = mptsas_get_sas_device_page0(mpt, page_address,
15630 		    &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15631 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15632 		if (rval != DDI_SUCCESS) {
15633 			mutex_exit(&mpt->m_mutex);
15634 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15635 			    "parent device for handle %d", page_address);
15636 			mdi_rtn = MDI_FAILURE;
15637 			goto virt_create_done;
15638 		}
15639 
15640 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15641 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15642 		rval = mptsas_get_sas_device_page0(mpt, page_address,
15643 		    &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15644 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15645 		if (rval != DDI_SUCCESS) {
15646 			mutex_exit(&mpt->m_mutex);
15647 			mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15648 			    "device info for handle %d", page_address);
15649 			mdi_rtn = MDI_FAILURE;
15650 			goto virt_create_done;
15651 		}
15652 
15653 		mutex_exit(&mpt->m_mutex);
15654 
15655 		/*
15656 		 * If this device direct attached to the controller
15657 		 * set the attached-port to the base wwid
15658 		 */
15659 		if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15660 		    != DEVINFO_DIRECT_ATTACHED) {
15661 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15662 			    pdev_sas_wwn);
15663 		} else {
15664 			/*
15665 			 * Update the iport's attached-port to guid
15666 			 */
15667 			if (sas_wwn == 0) {
15668 				(void) sprintf(wwn_str, "p%x", phy);
15669 			} else {
15670 				(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15671 			}
15672 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
15673 			    pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15674 			    DDI_PROP_SUCCESS) {
15675 				mptsas_log(mpt, CE_WARN,
15676 				    "mptsas unable to create "
15677 				    "property for iport target-port"
15678 				    " %s (sas_wwn)",
15679 				    wwn_str);
15680 				mdi_rtn = MDI_FAILURE;
15681 				goto virt_create_done;
15682 			}
15683 
15684 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15685 			    mpt->un.m_base_wwid);
15686 		}
15687 
15688 		if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
15689 			char	uabuf[SCSI_WWN_BUFLEN];
15690 
15691 			if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
15692 				mptsas_log(mpt, CE_WARN,
15693 				    "mptsas unable to format SATA bridge WWN");
15694 				mdi_rtn = MDI_FAILURE;
15695 				goto virt_create_done;
15696 			}
15697 
15698 			if (mdi_prop_update_string(*pip,
15699 			    SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
15700 			    DDI_SUCCESS) {
15701 				mptsas_log(mpt, CE_WARN,
15702 				    "mptsas unable to create SCSI bridge port "
15703 				    "property for SATA device");
15704 				mdi_rtn = MDI_FAILURE;
15705 				goto virt_create_done;
15706 			}
15707 		}
15708 
15709 		if (mdi_prop_update_string(*pip,
15710 		    SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15711 		    DDI_PROP_SUCCESS) {
15712 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15713 			    "property for iport attached-port %s (sas_wwn)",
15714 			    attached_wwn_str);
15715 			mdi_rtn = MDI_FAILURE;
15716 			goto virt_create_done;
15717 		}
15718 
15719 
15720 		if (inq->inq_dtype == 0) {
15721 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15722 			/*
15723 			 * set obp path for pathinfo
15724 			 */
15725 			(void) snprintf(component, MAXPATHLEN,
15726 			    "disk@%s", lun_addr);
15727 
15728 			if (mdi_pi_pathname_obp_set(*pip, component) !=
15729 			    DDI_SUCCESS) {
15730 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15731 				    "unable to set obp-path for object %s",
15732 				    component);
15733 				mdi_rtn = MDI_FAILURE;
15734 				goto virt_create_done;
15735 			}
15736 		}
15737 
15738 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15739 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15740 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15741 			if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15742 			    "pm-capable", 1)) !=
15743 			    DDI_PROP_SUCCESS) {
15744 				mptsas_log(mpt, CE_WARN, "mptsas driver"
15745 				    "failed to create pm-capable "
15746 				    "property, target %d", target);
15747 				mdi_rtn = MDI_FAILURE;
15748 				goto virt_create_done;
15749 			}
15750 		}
15751 		/*
15752 		 * Create the phy-num property
15753 		 */
15754 		if (mdi_prop_update_int(*pip, "phy-num",
15755 		    ptgt->m_phynum) != DDI_SUCCESS) {
15756 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15757 			    "create phy-num property for target %d lun %d",
15758 			    target, lun);
15759 			mdi_rtn = MDI_FAILURE;
15760 			goto virt_create_done;
15761 		}
15762 		NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15763 		mdi_rtn = mdi_pi_online(*pip, 0);
15764 		if (mdi_rtn == MDI_NOT_SUPPORTED) {
15765 			mdi_rtn = MDI_FAILURE;
15766 		}
15767 virt_create_done:
15768 		if (*pip && mdi_rtn != MDI_SUCCESS) {
15769 			(void) mdi_pi_free(*pip, 0);
15770 			*pip = NULL;
15771 			*lun_dip = NULL;
15772 		}
15773 	}
15774 
15775 	scsi_hba_nodename_compatible_free(nodename, compatible);
15776 	if (lun_addr != NULL) {
15777 		kmem_free(lun_addr, SCSI_MAXNAMELEN);
15778 	}
15779 	if (wwn_str != NULL) {
15780 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15781 	}
15782 	if (component != NULL) {
15783 		kmem_free(component, MAXPATHLEN);
15784 	}
15785 
15786 	return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15787 }
15788 
15789 static int
mptsas_create_phys_lun(dev_info_t * pdip,struct scsi_inquiry * inq,char * guid,dev_info_t ** lun_dip,mptsas_target_t * ptgt,int lun)15790 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15791     char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15792 {
15793 	int			target;
15794 	int			rval;
15795 	int			ndi_rtn = NDI_FAILURE;
15796 	uint64_t		be_sas_wwn;
15797 	char			*nodename = NULL;
15798 	char			**compatible = NULL;
15799 	int			ncompatible = 0;
15800 	int			instance = 0;
15801 	mptsas_t		*mpt = DIP2MPT(pdip);
15802 	char			*wwn_str = NULL;
15803 	char			*component = NULL;
15804 	char			*attached_wwn_str = NULL;
15805 	uint8_t			phy = 0xFF;
15806 	uint64_t		sas_wwn;
15807 	uint32_t		devinfo;
15808 	uint16_t		dev_hdl;
15809 	uint16_t		pdev_hdl;
15810 	uint64_t		pdev_sas_wwn;
15811 	uint64_t		dev_sas_wwn;
15812 	uint32_t		pdev_info;
15813 	uint8_t			physport;
15814 	uint8_t			phy_id;
15815 	uint32_t		page_address;
15816 	uint16_t		bay_num, enclosure, io_flags;
15817 	char			pdev_wwn_str[MPTSAS_WWN_STRLEN];
15818 	uint32_t		dev_info;
15819 	int64_t			lun64 = 0;
15820 
15821 	mutex_enter(&mpt->m_mutex);
15822 	target = ptgt->m_devhdl;
15823 	sas_wwn = ptgt->m_addr.mta_wwn;
15824 	devinfo = ptgt->m_deviceinfo;
15825 	phy = ptgt->m_phynum;
15826 	mutex_exit(&mpt->m_mutex);
15827 
15828 	/*
15829 	 * generate compatible property with binding-set "mpt"
15830 	 */
15831 	scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15832 	    &nodename, &compatible, &ncompatible);
15833 
15834 	/*
15835 	 * if nodename can't be determined then print a message and skip it
15836 	 */
15837 	if (nodename == NULL) {
15838 		mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15839 		    "for target %d lun %d", target, lun);
15840 		return (DDI_FAILURE);
15841 	}
15842 
15843 	ndi_rtn = ndi_devi_alloc(pdip, nodename,
15844 	    DEVI_SID_NODEID, lun_dip);
15845 
15846 	/*
15847 	 * if lun alloc success, set props
15848 	 */
15849 	if (ndi_rtn == NDI_SUCCESS) {
15850 
15851 		if (ndi_prop_update_int(DDI_DEV_T_NONE,
15852 		    *lun_dip, LUN_PROP, lun) !=
15853 		    DDI_PROP_SUCCESS) {
15854 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15855 			    "property for target %d lun %d (LUN_PROP)",
15856 			    target, lun);
15857 			ndi_rtn = NDI_FAILURE;
15858 			goto phys_create_done;
15859 		}
15860 
15861 		lun64 = (int64_t)lun;
15862 		if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15863 		    *lun_dip, LUN64_PROP, lun64) !=
15864 		    DDI_PROP_SUCCESS) {
15865 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15866 			    "property for target %d lun64 %d (LUN64_PROP)",
15867 			    target, lun);
15868 			ndi_rtn = NDI_FAILURE;
15869 			goto phys_create_done;
15870 		}
15871 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15872 		    *lun_dip, "compatible", compatible, ncompatible)
15873 		    != DDI_PROP_SUCCESS) {
15874 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15875 			    "property for target %d lun %d (COMPATIBLE)",
15876 			    target, lun);
15877 			ndi_rtn = NDI_FAILURE;
15878 			goto phys_create_done;
15879 		}
15880 
15881 		/*
15882 		 * We need the SAS WWN for non-multipath devices, so
15883 		 * we'll use the same property as that multipathing
15884 		 * devices need to present for MPAPI. If we don't have
15885 		 * a WWN (e.g. parallel SCSI), don't create the prop.
15886 		 */
15887 		wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15888 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15889 		if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15890 		    *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15891 		    != DDI_PROP_SUCCESS) {
15892 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
15893 			    "create property for SAS target %d lun %d "
15894 			    "(target-port)", target, lun);
15895 			ndi_rtn = NDI_FAILURE;
15896 			goto phys_create_done;
15897 		}
15898 
15899 		be_sas_wwn = BE_64(sas_wwn);
15900 		if (sas_wwn && ndi_prop_update_byte_array(
15901 		    DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15902 		    (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15903 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
15904 			    "create property for SAS target %d lun %d "
15905 			    "(port-wwn)", target, lun);
15906 			ndi_rtn = NDI_FAILURE;
15907 			goto phys_create_done;
15908 		} else if ((sas_wwn == 0) && (ndi_prop_update_int(
15909 		    DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15910 		    DDI_PROP_SUCCESS)) {
15911 			/*
15912 			 * Direct attached SATA device without DeviceName
15913 			 */
15914 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
15915 			    "create property for SAS target %d lun %d "
15916 			    "(sata-phy)", target, lun);
15917 			ndi_rtn = NDI_FAILURE;
15918 			goto phys_create_done;
15919 		}
15920 
15921 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15922 		    *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15923 			mptsas_log(mpt, CE_WARN, "mptsas unable to"
15924 			    "create property for SAS target %d lun %d"
15925 			    " (SAS_PROP)", target, lun);
15926 			ndi_rtn = NDI_FAILURE;
15927 			goto phys_create_done;
15928 		}
15929 		if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15930 		    *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15931 			mptsas_log(mpt, CE_WARN, "mptsas unable "
15932 			    "to create guid property for target %d "
15933 			    "lun %d", target, lun);
15934 			ndi_rtn = NDI_FAILURE;
15935 			goto phys_create_done;
15936 		}
15937 
15938 		/*
15939 		 * The following code is to set properties for SM-HBA support,
15940 		 * it doesn't apply to RAID volumes
15941 		 */
15942 		if (ptgt->m_addr.mta_phymask == 0)
15943 			goto phys_raid_lun;
15944 
15945 		mutex_enter(&mpt->m_mutex);
15946 
15947 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15948 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15949 		    (uint32_t)ptgt->m_devhdl;
15950 		rval = mptsas_get_sas_device_page0(mpt, page_address,
15951 		    &dev_hdl, &dev_sas_wwn, &dev_info,
15952 		    &physport, &phy_id, &pdev_hdl,
15953 		    &bay_num, &enclosure, &io_flags);
15954 		if (rval != DDI_SUCCESS) {
15955 			mutex_exit(&mpt->m_mutex);
15956 			mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15957 			    "parent device for handle %d.", page_address);
15958 			ndi_rtn = NDI_FAILURE;
15959 			goto phys_create_done;
15960 		}
15961 
15962 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15963 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15964 		rval = mptsas_get_sas_device_page0(mpt, page_address,
15965 		    &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15966 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15967 		if (rval != DDI_SUCCESS) {
15968 			mutex_exit(&mpt->m_mutex);
15969 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15970 			    "device for handle %d.", page_address);
15971 			ndi_rtn = NDI_FAILURE;
15972 			goto phys_create_done;
15973 		}
15974 
15975 		mutex_exit(&mpt->m_mutex);
15976 
15977 		/*
15978 		 * If this device direct attached to the controller
15979 		 * set the attached-port to the base wwid
15980 		 */
15981 		if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15982 		    != DEVINFO_DIRECT_ATTACHED) {
15983 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15984 			    pdev_sas_wwn);
15985 		} else {
15986 			/*
15987 			 * Update the iport's attached-port to guid
15988 			 */
15989 			if (sas_wwn == 0) {
15990 				(void) sprintf(wwn_str, "p%x", phy);
15991 			} else {
15992 				(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15993 			}
15994 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
15995 			    pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15996 			    DDI_PROP_SUCCESS) {
15997 				mptsas_log(mpt, CE_WARN,
15998 				    "mptsas unable to create "
15999 				    "property for iport target-port"
16000 				    " %s (sas_wwn)",
16001 				    wwn_str);
16002 				ndi_rtn = NDI_FAILURE;
16003 				goto phys_create_done;
16004 			}
16005 
16006 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
16007 			    mpt->un.m_base_wwid);
16008 		}
16009 
16010 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16011 		    *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
16012 		    DDI_PROP_SUCCESS) {
16013 			mptsas_log(mpt, CE_WARN,
16014 			    "mptsas unable to create "
16015 			    "property for iport attached-port %s (sas_wwn)",
16016 			    attached_wwn_str);
16017 			ndi_rtn = NDI_FAILURE;
16018 			goto phys_create_done;
16019 		}
16020 
16021 		if (IS_SATA_DEVICE(dev_info)) {
16022 			char	uabuf[SCSI_WWN_BUFLEN];
16023 
16024 			if (ndi_prop_update_string(DDI_DEV_T_NONE,
16025 			    *lun_dip, MPTSAS_VARIANT, "sata") !=
16026 			    DDI_PROP_SUCCESS) {
16027 				mptsas_log(mpt, CE_WARN,
16028 				    "mptsas unable to create "
16029 				    "property for device variant ");
16030 				ndi_rtn = NDI_FAILURE;
16031 				goto phys_create_done;
16032 			}
16033 
16034 			if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
16035 				mptsas_log(mpt, CE_WARN,
16036 				    "mptsas unable to format SATA bridge WWN");
16037 				ndi_rtn = NDI_FAILURE;
16038 				goto phys_create_done;
16039 			}
16040 
16041 			if (ndi_prop_update_string(DDI_DEV_T_NONE, *lun_dip,
16042 			    SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
16043 			    DDI_PROP_SUCCESS) {
16044 				mptsas_log(mpt, CE_WARN,
16045 				    "mptsas unable to create SCSI bridge port "
16046 				    "property for SATA device");
16047 				ndi_rtn = NDI_FAILURE;
16048 				goto phys_create_done;
16049 			}
16050 		}
16051 
16052 		if (IS_ATAPI_DEVICE(dev_info)) {
16053 			if (ndi_prop_update_string(DDI_DEV_T_NONE,
16054 			    *lun_dip, MPTSAS_VARIANT, "atapi") !=
16055 			    DDI_PROP_SUCCESS) {
16056 				mptsas_log(mpt, CE_WARN,
16057 				    "mptsas unable to create "
16058 				    "property for device variant ");
16059 				ndi_rtn = NDI_FAILURE;
16060 				goto phys_create_done;
16061 			}
16062 		}
16063 
16064 phys_raid_lun:
16065 		/*
16066 		 * if this is a SAS controller, and the target is a SATA
16067 		 * drive, set the 'pm-capable' property for sd and if on
16068 		 * an OPL platform, also check if this is an ATAPI
16069 		 * device.
16070 		 */
16071 		instance = ddi_get_instance(mpt->m_dip);
16072 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
16073 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
16074 			NDBG2(("mptsas%d: creating pm-capable property, "
16075 			    "target %d", instance, target));
16076 
16077 			if ((ndi_prop_update_int(DDI_DEV_T_NONE,
16078 			    *lun_dip, "pm-capable", 1)) !=
16079 			    DDI_PROP_SUCCESS) {
16080 				mptsas_log(mpt, CE_WARN, "mptsas "
16081 				    "failed to create pm-capable "
16082 				    "property, target %d", target);
16083 				ndi_rtn = NDI_FAILURE;
16084 				goto phys_create_done;
16085 			}
16086 
16087 		}
16088 
16089 		if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
16090 			/*
16091 			 * add 'obp-path' properties for devinfo
16092 			 */
16093 			bzero(wwn_str, sizeof (wwn_str));
16094 			(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
16095 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
16096 			if (guid) {
16097 				(void) snprintf(component, MAXPATHLEN,
16098 				    "disk@w%s,%x", wwn_str, lun);
16099 			} else {
16100 				(void) snprintf(component, MAXPATHLEN,
16101 				    "disk@p%x,%x", phy, lun);
16102 			}
16103 			if (ddi_pathname_obp_set(*lun_dip, component)
16104 			    != DDI_SUCCESS) {
16105 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
16106 				    "unable to set obp-path for SAS "
16107 				    "object %s", component);
16108 				ndi_rtn = NDI_FAILURE;
16109 				goto phys_create_done;
16110 			}
16111 		}
16112 		/*
16113 		 * Create the phy-num property for non-raid disk
16114 		 */
16115 		if (ptgt->m_addr.mta_phymask != 0) {
16116 			if (ndi_prop_update_int(DDI_DEV_T_NONE,
16117 			    *lun_dip, "phy-num", ptgt->m_phynum) !=
16118 			    DDI_PROP_SUCCESS) {
16119 				mptsas_log(mpt, CE_WARN, "mptsas driver "
16120 				    "failed to create phy-num property for "
16121 				    "target %d", target);
16122 				ndi_rtn = NDI_FAILURE;
16123 				goto phys_create_done;
16124 			}
16125 		}
16126 phys_create_done:
16127 		/*
16128 		 * If props were setup ok, online the lun
16129 		 */
16130 		if (ndi_rtn == NDI_SUCCESS) {
16131 			/*
16132 			 * Try to online the new node
16133 			 */
16134 			ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
16135 		}
16136 
16137 		/*
16138 		 * If success set rtn flag, else unwire alloc'd lun
16139 		 */
16140 		if (ndi_rtn != NDI_SUCCESS) {
16141 			NDBG12(("mptsas driver unable to online "
16142 			    "target %d lun %d", target, lun));
16143 			ndi_prop_remove_all(*lun_dip);
16144 			(void) ndi_devi_free(*lun_dip);
16145 			*lun_dip = NULL;
16146 		}
16147 	}
16148 
16149 	scsi_hba_nodename_compatible_free(nodename, compatible);
16150 
16151 	if (wwn_str != NULL) {
16152 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16153 	}
16154 	if (component != NULL) {
16155 		kmem_free(component, MAXPATHLEN);
16156 	}
16157 
16158 
16159 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16160 }
16161 
16162 static int
mptsas_probe_smp(dev_info_t * pdip,uint64_t wwn)16163 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
16164 {
16165 	mptsas_t	*mpt = DIP2MPT(pdip);
16166 	struct smp_device smp_sd;
16167 
16168 	/* XXX An HBA driver should not be allocating an smp_device. */
16169 	bzero(&smp_sd, sizeof (struct smp_device));
16170 	smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16171 	bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16172 
16173 	if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16174 		return (NDI_FAILURE);
16175 	return (NDI_SUCCESS);
16176 }
16177 
16178 static int
mptsas_config_smp(dev_info_t * pdip,uint64_t sas_wwn,dev_info_t ** smp_dip)16179 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16180 {
16181 	mptsas_t	*mpt = DIP2MPT(pdip);
16182 	mptsas_smp_t	*psmp = NULL;
16183 	int		rval;
16184 	int		phymask;
16185 
16186 	/*
16187 	 * Get the physical port associated to the iport
16188 	 * PHYMASK TODO
16189 	 */
16190 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16191 	    "phymask", 0);
16192 	/*
16193 	 * Find the smp node in hash table with specified sas address and
16194 	 * physical port
16195 	 */
16196 	psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16197 	if (psmp == NULL) {
16198 		return (DDI_FAILURE);
16199 	}
16200 
16201 	rval = mptsas_online_smp(pdip, psmp, smp_dip);
16202 
16203 	return (rval);
16204 }
16205 
16206 static int
mptsas_online_smp(dev_info_t * pdip,mptsas_smp_t * smp_node,dev_info_t ** smp_dip)16207 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16208     dev_info_t **smp_dip)
16209 {
16210 	char		wwn_str[MPTSAS_WWN_STRLEN];
16211 	char		attached_wwn_str[MPTSAS_WWN_STRLEN];
16212 	int		ndi_rtn = NDI_FAILURE;
16213 	int		rval = 0;
16214 	mptsas_smp_t	dev_info;
16215 	uint32_t	page_address;
16216 	mptsas_t	*mpt = DIP2MPT(pdip);
16217 	uint16_t	dev_hdl;
16218 	uint64_t	sas_wwn;
16219 	uint64_t	smp_sas_wwn;
16220 	uint8_t		physport;
16221 	uint8_t		phy_id;
16222 	uint16_t	pdev_hdl;
16223 	uint8_t		numphys = 0;
16224 	uint16_t	i = 0;
16225 	char		phymask[MPTSAS_MAX_PHYS];
16226 	char		*iport = NULL;
16227 	mptsas_phymask_t	phy_mask = 0;
16228 	uint16_t	attached_devhdl;
16229 	uint16_t	bay_num, enclosure, io_flags;
16230 
16231 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
16232 
16233 	/*
16234 	 * Probe smp device, prevent the node of removed device from being
16235 	 * configured succesfully
16236 	 */
16237 	if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
16238 		return (DDI_FAILURE);
16239 	}
16240 
16241 	if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
16242 		return (DDI_SUCCESS);
16243 	}
16244 
16245 	ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
16246 
16247 	/*
16248 	 * if lun alloc success, set props
16249 	 */
16250 	if (ndi_rtn == NDI_SUCCESS) {
16251 		/*
16252 		 * Set the flavor of the child to be SMP flavored
16253 		 */
16254 		ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
16255 
16256 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16257 		    *smp_dip, SMP_WWN, wwn_str) !=
16258 		    DDI_PROP_SUCCESS) {
16259 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16260 			    "property for smp device %s (sas_wwn)",
16261 			    wwn_str);
16262 			ndi_rtn = NDI_FAILURE;
16263 			goto smp_create_done;
16264 		}
16265 		(void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
16266 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16267 		    *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
16268 		    DDI_PROP_SUCCESS) {
16269 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16270 			    "property for iport target-port %s (sas_wwn)",
16271 			    wwn_str);
16272 			ndi_rtn = NDI_FAILURE;
16273 			goto smp_create_done;
16274 		}
16275 
16276 		mutex_enter(&mpt->m_mutex);
16277 
16278 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
16279 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
16280 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
16281 		    &dev_info);
16282 		if (rval != DDI_SUCCESS) {
16283 			mutex_exit(&mpt->m_mutex);
16284 			mptsas_log(mpt, CE_WARN,
16285 			    "mptsas unable to get expander "
16286 			    "parent device info for %x", page_address);
16287 			ndi_rtn = NDI_FAILURE;
16288 			goto smp_create_done;
16289 		}
16290 
16291 		smp_node->m_pdevhdl = dev_info.m_pdevhdl;
16292 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16293 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16294 		    (uint32_t)dev_info.m_pdevhdl;
16295 		rval = mptsas_get_sas_device_page0(mpt, page_address,
16296 		    &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
16297 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16298 		if (rval != DDI_SUCCESS) {
16299 			mutex_exit(&mpt->m_mutex);
16300 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16301 			    "device info for %x", page_address);
16302 			ndi_rtn = NDI_FAILURE;
16303 			goto smp_create_done;
16304 		}
16305 
16306 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16307 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16308 		    (uint32_t)dev_info.m_devhdl;
16309 		rval = mptsas_get_sas_device_page0(mpt, page_address,
16310 		    &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
16311 		    &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
16312 		    &io_flags);
16313 		if (rval != DDI_SUCCESS) {
16314 			mutex_exit(&mpt->m_mutex);
16315 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16316 			    "device info for %x", page_address);
16317 			ndi_rtn = NDI_FAILURE;
16318 			goto smp_create_done;
16319 		}
16320 		mutex_exit(&mpt->m_mutex);
16321 
16322 		/*
16323 		 * If this smp direct attached to the controller
16324 		 * set the attached-port to the base wwid
16325 		 */
16326 		if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16327 		    != DEVINFO_DIRECT_ATTACHED) {
16328 			(void) sprintf(attached_wwn_str, "w%016"PRIx64,
16329 			    sas_wwn);
16330 		} else {
16331 			(void) sprintf(attached_wwn_str, "w%016"PRIx64,
16332 			    mpt->un.m_base_wwid);
16333 		}
16334 
16335 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16336 		    *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
16337 		    DDI_PROP_SUCCESS) {
16338 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16339 			    "property for smp attached-port %s (sas_wwn)",
16340 			    attached_wwn_str);
16341 			ndi_rtn = NDI_FAILURE;
16342 			goto smp_create_done;
16343 		}
16344 
16345 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
16346 		    *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
16347 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
16348 			    "create property for SMP %s (SMP_PROP) ",
16349 			    wwn_str);
16350 			ndi_rtn = NDI_FAILURE;
16351 			goto smp_create_done;
16352 		}
16353 
16354 		/*
16355 		 * check the smp to see whether it direct
16356 		 * attached to the controller
16357 		 */
16358 		if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16359 		    != DEVINFO_DIRECT_ATTACHED) {
16360 			goto smp_create_done;
16361 		}
16362 		numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
16363 		    DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
16364 		if (numphys > 0) {
16365 			goto smp_create_done;
16366 		}
16367 		/*
16368 		 * this iport is an old iport, we need to
16369 		 * reconfig the props for it.
16370 		 */
16371 		if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16372 		    MPTSAS_VIRTUAL_PORT, 0) !=
16373 		    DDI_PROP_SUCCESS) {
16374 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16375 			    MPTSAS_VIRTUAL_PORT);
16376 			mptsas_log(mpt, CE_WARN, "mptsas virtual port "
16377 			    "prop update failed");
16378 			goto smp_create_done;
16379 		}
16380 
16381 		mutex_enter(&mpt->m_mutex);
16382 		numphys = 0;
16383 		iport = ddi_get_name_addr(pdip);
16384 		for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16385 			bzero(phymask, sizeof (phymask));
16386 			(void) sprintf(phymask,
16387 			    "%x", mpt->m_phy_info[i].phy_mask);
16388 			if (strcmp(phymask, iport) == 0) {
16389 				phy_mask = mpt->m_phy_info[i].phy_mask;
16390 				break;
16391 			}
16392 		}
16393 
16394 		for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16395 			if ((phy_mask >> i) & 0x01) {
16396 				numphys++;
16397 			}
16398 		}
16399 		/*
16400 		 * Update PHY info for smhba
16401 		 */
16402 		if (mptsas_smhba_phy_init(mpt)) {
16403 			mutex_exit(&mpt->m_mutex);
16404 			mptsas_log(mpt, CE_WARN, "mptsas phy update "
16405 			    "failed");
16406 			goto smp_create_done;
16407 		}
16408 		mutex_exit(&mpt->m_mutex);
16409 
16410 		mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
16411 		    &attached_devhdl);
16412 
16413 		if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16414 		    MPTSAS_NUM_PHYS, numphys) !=
16415 		    DDI_PROP_SUCCESS) {
16416 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16417 			    MPTSAS_NUM_PHYS);
16418 			mptsas_log(mpt, CE_WARN, "mptsas update "
16419 			    "num phys props failed");
16420 			goto smp_create_done;
16421 		}
16422 		/*
16423 		 * Add parent's props for SMHBA support
16424 		 */
16425 		if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
16426 		    SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16427 		    DDI_PROP_SUCCESS) {
16428 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16429 			    SCSI_ADDR_PROP_ATTACHED_PORT);
16430 			mptsas_log(mpt, CE_WARN, "mptsas update iport"
16431 			    "attached-port failed");
16432 			goto smp_create_done;
16433 		}
16434 
16435 smp_create_done:
16436 		/*
16437 		 * If props were setup ok, online the lun
16438 		 */
16439 		if (ndi_rtn == NDI_SUCCESS) {
16440 			/*
16441 			 * Try to online the new node
16442 			 */
16443 			ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
16444 		}
16445 
16446 		/*
16447 		 * If success set rtn flag, else unwire alloc'd lun
16448 		 */
16449 		if (ndi_rtn != NDI_SUCCESS) {
16450 			NDBG12(("mptsas unable to online "
16451 			    "SMP target %s", wwn_str));
16452 			ndi_prop_remove_all(*smp_dip);
16453 			(void) ndi_devi_free(*smp_dip);
16454 		}
16455 	}
16456 
16457 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16458 }
16459 
16460 /* smp transport routine */
mptsas_smp_start(struct smp_pkt * smp_pkt)16461 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
16462 {
16463 	uint64_t			wwn;
16464 	Mpi2SmpPassthroughRequest_t	req;
16465 	Mpi2SmpPassthroughReply_t	rep;
16466 	uint32_t			direction = 0;
16467 	mptsas_t			*mpt;
16468 	int				ret;
16469 	uint64_t			tmp64;
16470 
16471 	mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
16472 	    smp_a_hba_tran->smp_tran_hba_private;
16473 
16474 	bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
16475 	/*
16476 	 * Need to compose a SMP request message
16477 	 * and call mptsas_do_passthru() function
16478 	 */
16479 	bzero(&req, sizeof (req));
16480 	bzero(&rep, sizeof (rep));
16481 	req.PassthroughFlags = 0;
16482 	req.PhysicalPort = 0xff;
16483 	req.ChainOffset = 0;
16484 	req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16485 
16486 	if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16487 		smp_pkt->smp_pkt_reason = ERANGE;
16488 		return (DDI_FAILURE);
16489 	}
16490 	req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16491 
16492 	req.MsgFlags = 0;
16493 	tmp64 = LE_64(wwn);
16494 	bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16495 	if (smp_pkt->smp_pkt_rspsize > 0) {
16496 		direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16497 	}
16498 	if (smp_pkt->smp_pkt_reqsize > 0) {
16499 		direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16500 	}
16501 
16502 	mutex_enter(&mpt->m_mutex);
16503 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16504 	    (uint8_t *)smp_pkt->smp_pkt_rsp,
16505 	    offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16506 	    smp_pkt->smp_pkt_rspsize - 4, direction,
16507 	    (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16508 	    smp_pkt->smp_pkt_timeout, FKIOCTL);
16509 	mutex_exit(&mpt->m_mutex);
16510 	if (ret != 0) {
16511 		cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16512 		smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16513 		return (DDI_FAILURE);
16514 	}
16515 	/* do passthrough success, check the smp status */
16516 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16517 		switch (LE_16(rep.IOCStatus)) {
16518 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16519 			smp_pkt->smp_pkt_reason = ENODEV;
16520 			break;
16521 		case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16522 			smp_pkt->smp_pkt_reason = EOVERFLOW;
16523 			break;
16524 		case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16525 			smp_pkt->smp_pkt_reason = EIO;
16526 			break;
16527 		default:
16528 			mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16529 			    "status:%x", LE_16(rep.IOCStatus));
16530 			smp_pkt->smp_pkt_reason = EIO;
16531 			break;
16532 		}
16533 		return (DDI_FAILURE);
16534 	}
16535 	if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16536 		mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16537 		    rep.SASStatus);
16538 		smp_pkt->smp_pkt_reason = EIO;
16539 		return (DDI_FAILURE);
16540 	}
16541 
16542 	return (DDI_SUCCESS);
16543 }
16544 
16545 /*
16546  * If we didn't get a match, we need to get sas page0 for each device, and
16547  * untill we get a match. If failed, return NULL
16548  */
16549 static mptsas_target_t *
mptsas_phy_to_tgt(mptsas_t * mpt,mptsas_phymask_t phymask,uint8_t phy)16550 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16551 {
16552 	int		i, j = 0;
16553 	int		rval = 0;
16554 	uint16_t	cur_handle;
16555 	uint32_t	page_address;
16556 	mptsas_target_t	*ptgt = NULL;
16557 
16558 	/*
16559 	 * PHY named device must be direct attached and attaches to
16560 	 * narrow port, if the iport is not parent of the device which
16561 	 * we are looking for.
16562 	 */
16563 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16564 		if ((1 << i) & phymask)
16565 			j++;
16566 	}
16567 
16568 	if (j > 1)
16569 		return (NULL);
16570 
16571 	/*
16572 	 * Must be a narrow port and single device attached to the narrow port
16573 	 * So the physical port num of device  which is equal to the iport's
16574 	 * port num is the device what we are looking for.
16575 	 */
16576 
16577 	if (mpt->m_phy_info[phy].phy_mask != phymask)
16578 		return (NULL);
16579 
16580 	mutex_enter(&mpt->m_mutex);
16581 
16582 	ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16583 	    &phy);
16584 	if (ptgt != NULL) {
16585 		mutex_exit(&mpt->m_mutex);
16586 		return (ptgt);
16587 	}
16588 
16589 	if (mpt->m_done_traverse_dev) {
16590 		mutex_exit(&mpt->m_mutex);
16591 		return (NULL);
16592 	}
16593 
16594 	/* If didn't get a match, come here */
16595 	cur_handle = mpt->m_dev_handle;
16596 	for (; ; ) {
16597 		ptgt = NULL;
16598 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16599 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16600 		rval = mptsas_get_target_device_info(mpt, page_address,
16601 		    &cur_handle, &ptgt);
16602 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
16603 		    (rval == DEV_INFO_FAIL_ALLOC)) {
16604 			break;
16605 		}
16606 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16607 		    (rval == DEV_INFO_PHYS_DISK) ||
16608 		    (rval == DEV_INFO_FAIL_GUID)) {
16609 			continue;
16610 		}
16611 		mpt->m_dev_handle = cur_handle;
16612 
16613 		if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16614 			break;
16615 		}
16616 	}
16617 
16618 	mutex_exit(&mpt->m_mutex);
16619 	return (ptgt);
16620 }
16621 
16622 /*
16623  * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16624  * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16625  * If we didn't get a match, we need to get sas page0 for each device, and
16626  * untill we get a match
16627  * If failed, return NULL
16628  */
16629 static mptsas_target_t *
mptsas_wwid_to_ptgt(mptsas_t * mpt,mptsas_phymask_t phymask,uint64_t wwid)16630 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16631 {
16632 	int		rval = 0;
16633 	uint16_t	cur_handle;
16634 	uint32_t	page_address;
16635 	mptsas_target_t	*tmp_tgt = NULL;
16636 	mptsas_target_addr_t addr;
16637 
16638 	addr.mta_wwn = wwid;
16639 	addr.mta_phymask = phymask;
16640 	mutex_enter(&mpt->m_mutex);
16641 	tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16642 	if (tmp_tgt != NULL) {
16643 		mutex_exit(&mpt->m_mutex);
16644 		return (tmp_tgt);
16645 	}
16646 
16647 	if (phymask == 0) {
16648 		/*
16649 		 * It's IR volume
16650 		 */
16651 		rval = mptsas_get_raid_info(mpt);
16652 		if (rval) {
16653 			tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16654 		}
16655 		mutex_exit(&mpt->m_mutex);
16656 		return (tmp_tgt);
16657 	}
16658 
16659 	if (mpt->m_done_traverse_dev) {
16660 		mutex_exit(&mpt->m_mutex);
16661 		return (NULL);
16662 	}
16663 
16664 	/* If didn't get a match, come here */
16665 	cur_handle = mpt->m_dev_handle;
16666 	for (;;) {
16667 		tmp_tgt = NULL;
16668 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16669 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16670 		rval = mptsas_get_target_device_info(mpt, page_address,
16671 		    &cur_handle, &tmp_tgt);
16672 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
16673 		    (rval == DEV_INFO_FAIL_ALLOC)) {
16674 			tmp_tgt = NULL;
16675 			break;
16676 		}
16677 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16678 		    (rval == DEV_INFO_PHYS_DISK) ||
16679 		    (rval == DEV_INFO_FAIL_GUID)) {
16680 			continue;
16681 		}
16682 		mpt->m_dev_handle = cur_handle;
16683 		if ((tmp_tgt->m_addr.mta_wwn) &&
16684 		    (tmp_tgt->m_addr.mta_wwn == wwid) &&
16685 		    (tmp_tgt->m_addr.mta_phymask == phymask)) {
16686 			break;
16687 		}
16688 	}
16689 
16690 	mutex_exit(&mpt->m_mutex);
16691 	return (tmp_tgt);
16692 }
16693 
16694 static mptsas_smp_t *
mptsas_wwid_to_psmp(mptsas_t * mpt,mptsas_phymask_t phymask,uint64_t wwid)16695 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16696 {
16697 	int		rval = 0;
16698 	uint16_t	cur_handle;
16699 	uint32_t	page_address;
16700 	mptsas_smp_t	smp_node, *psmp = NULL;
16701 	mptsas_target_addr_t addr;
16702 
16703 	addr.mta_wwn = wwid;
16704 	addr.mta_phymask = phymask;
16705 	mutex_enter(&mpt->m_mutex);
16706 	psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16707 	if (psmp != NULL) {
16708 		mutex_exit(&mpt->m_mutex);
16709 		return (psmp);
16710 	}
16711 
16712 	if (mpt->m_done_traverse_smp) {
16713 		mutex_exit(&mpt->m_mutex);
16714 		return (NULL);
16715 	}
16716 
16717 	/* If didn't get a match, come here */
16718 	cur_handle = mpt->m_smp_devhdl;
16719 	for (;;) {
16720 		psmp = NULL;
16721 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16722 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16723 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
16724 		    &smp_node);
16725 		if (rval != DDI_SUCCESS) {
16726 			break;
16727 		}
16728 		mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16729 		psmp = mptsas_smp_alloc(mpt, &smp_node);
16730 		ASSERT(psmp);
16731 		if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16732 		    (psmp->m_addr.mta_phymask == phymask)) {
16733 			break;
16734 		}
16735 	}
16736 
16737 	mutex_exit(&mpt->m_mutex);
16738 	return (psmp);
16739 }
16740 
16741 mptsas_target_t *
mptsas_tgt_alloc(refhash_t * refhash,uint16_t devhdl,uint64_t wwid,uint32_t devinfo,mptsas_phymask_t phymask,uint8_t phynum)16742 mptsas_tgt_alloc(refhash_t *refhash, uint16_t devhdl, uint64_t wwid,
16743     uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16744 {
16745 	mptsas_target_t *tmp_tgt = NULL;
16746 	mptsas_target_addr_t addr;
16747 
16748 	addr.mta_wwn = wwid;
16749 	addr.mta_phymask = phymask;
16750 	tmp_tgt = refhash_lookup(refhash, &addr);
16751 	if (tmp_tgt != NULL) {
16752 		NDBG20(("Hash item already exist"));
16753 		tmp_tgt->m_deviceinfo = devinfo;
16754 		tmp_tgt->m_devhdl = devhdl;	/* XXX - duplicate? */
16755 		return (tmp_tgt);
16756 	}
16757 	tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16758 	if (tmp_tgt == NULL) {
16759 		cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16760 		return (NULL);
16761 	}
16762 	tmp_tgt->m_devhdl = devhdl;
16763 	tmp_tgt->m_addr.mta_wwn = wwid;
16764 	tmp_tgt->m_deviceinfo = devinfo;
16765 	tmp_tgt->m_addr.mta_phymask = phymask;
16766 	tmp_tgt->m_phynum = phynum;
16767 	/* Initialized the tgt structure */
16768 	tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16769 	tmp_tgt->m_qfull_retry_interval =
16770 	    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16771 	tmp_tgt->m_t_throttle = MAX_THROTTLE;
16772 	TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16773 
16774 	refhash_insert(refhash, tmp_tgt);
16775 
16776 	return (tmp_tgt);
16777 }
16778 
16779 static void
mptsas_smp_target_copy(mptsas_smp_t * src,mptsas_smp_t * dst)16780 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16781 {
16782 	dst->m_devhdl = src->m_devhdl;
16783 	dst->m_deviceinfo = src->m_deviceinfo;
16784 	dst->m_pdevhdl = src->m_pdevhdl;
16785 	dst->m_pdevinfo = src->m_pdevinfo;
16786 }
16787 
16788 static mptsas_smp_t *
mptsas_smp_alloc(mptsas_t * mpt,mptsas_smp_t * data)16789 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16790 {
16791 	mptsas_target_addr_t addr;
16792 	mptsas_smp_t *ret_data;
16793 
16794 	addr.mta_wwn = data->m_addr.mta_wwn;
16795 	addr.mta_phymask = data->m_addr.mta_phymask;
16796 	ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16797 	/*
16798 	 * If there's already a matching SMP target, update its fields
16799 	 * in place.  Since the address is not changing, it's safe to do
16800 	 * this.  We cannot just bcopy() here because the structure we've
16801 	 * been given has invalid hash links.
16802 	 */
16803 	if (ret_data != NULL) {
16804 		mptsas_smp_target_copy(data, ret_data);
16805 		return (ret_data);
16806 	}
16807 
16808 	ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16809 	bcopy(data, ret_data, sizeof (mptsas_smp_t));
16810 	refhash_insert(mpt->m_smp_targets, ret_data);
16811 	return (ret_data);
16812 }
16813 
16814 /*
16815  * Functions for SGPIO LED support
16816  */
16817 static dev_info_t *
mptsas_get_dip_from_dev(dev_t dev,mptsas_phymask_t * phymask)16818 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16819 {
16820 	dev_info_t	*dip;
16821 	int		prop;
16822 	dip = e_ddi_hold_devi_by_dev(dev, 0);
16823 	if (dip == NULL)
16824 		return (dip);
16825 	prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16826 	    "phymask", 0);
16827 	*phymask = (mptsas_phymask_t)prop;
16828 	ddi_release_devi(dip);
16829 	return (dip);
16830 }
16831 static mptsas_target_t *
mptsas_addr_to_ptgt(mptsas_t * mpt,char * addr,mptsas_phymask_t phymask)16832 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16833 {
16834 	uint8_t			phynum;
16835 	uint64_t		wwn;
16836 	int			lun;
16837 	mptsas_target_t		*ptgt = NULL;
16838 
16839 	if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16840 		return (NULL);
16841 	}
16842 	if (addr[0] == 'w') {
16843 		ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16844 	} else {
16845 		ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16846 	}
16847 	return (ptgt);
16848 }
16849 
16850 static int
mptsas_flush_led_status(mptsas_t * mpt,mptsas_enclosure_t * mep,uint16_t idx)16851 mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx)
16852 {
16853 	uint32_t slotstatus = 0;
16854 
16855 	ASSERT3U(idx, <, mep->me_nslots);
16856 
16857 	/* Build an MPI2 Slot Status based on our view of the world */
16858 	if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16859 		slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16860 	if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16861 		slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16862 	if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16863 		slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16864 
16865 	/* Write it to the controller */
16866 	NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16867 	    slotstatus, idx + mep->me_fslot));
16868 	return (mptsas_send_sep(mpt, mep, idx, &slotstatus,
16869 	    MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16870 }
16871 
16872 /*
16873  *  send sep request, use enclosure/slot addressing
16874  */
16875 static int
mptsas_send_sep(mptsas_t * mpt,mptsas_enclosure_t * mep,uint16_t idx,uint32_t * status,uint8_t act)16876 mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
16877     uint32_t *status, uint8_t act)
16878 {
16879 	Mpi2SepRequest_t	req;
16880 	Mpi2SepReply_t		rep;
16881 	int			ret;
16882 	uint16_t		enctype;
16883 	uint16_t		slot;
16884 
16885 	ASSERT(mutex_owned(&mpt->m_mutex));
16886 
16887 	/*
16888 	 * Look through the enclosures and make sure that this enclosure is
16889 	 * something that is directly attached device. If we didn't find an
16890 	 * enclosure for this device, don't send the ioctl.
16891 	 */
16892 	enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16893 	if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16894 	    enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16895 	    enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16896 		return (ENOTTY);
16897 	}
16898 	slot = idx + mep->me_fslot;
16899 
16900 	bzero(&req, sizeof (req));
16901 	bzero(&rep, sizeof (rep));
16902 
16903 	req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16904 	req.Action = act;
16905 	req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16906 	req.EnclosureHandle = LE_16(mep->me_enchdl);
16907 	req.Slot = LE_16(slot);
16908 	if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16909 		req.SlotStatus = LE_32(*status);
16910 	}
16911 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16912 	    sizeof (req), sizeof (rep), 0, MPTSAS_PASS_THRU_DIRECTION_NONE,
16913 	    NULL, 0, 60, FKIOCTL);
16914 	if (ret != 0) {
16915 		mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16916 		    "Processor Request message error %d", ret);
16917 		return (ret);
16918 	}
16919 	/* do passthrough success, check the ioc status */
16920 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16921 		mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16922 		    "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16923 		    LE_32(rep.IOCLogInfo));
16924 		switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16925 		case MPI2_IOCSTATUS_INVALID_FUNCTION:
16926 		case MPI2_IOCSTATUS_INVALID_VPID:
16927 		case MPI2_IOCSTATUS_INVALID_FIELD:
16928 		case MPI2_IOCSTATUS_INVALID_STATE:
16929 		case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16930 		case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16931 		case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16932 		case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16933 		case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16934 		case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16935 			return (EINVAL);
16936 		case MPI2_IOCSTATUS_BUSY:
16937 			return (EBUSY);
16938 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16939 			return (EAGAIN);
16940 		case MPI2_IOCSTATUS_INVALID_SGL:
16941 		case MPI2_IOCSTATUS_INTERNAL_ERROR:
16942 		case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16943 		default:
16944 			return (EIO);
16945 		}
16946 	}
16947 	if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16948 		*status = LE_32(rep.SlotStatus);
16949 	}
16950 
16951 	return (0);
16952 }
16953 
16954 int
mptsas_dma_addr_create(mptsas_t * mpt,ddi_dma_attr_t dma_attr,ddi_dma_handle_t * dma_hdp,ddi_acc_handle_t * acc_hdp,caddr_t * dma_memp,uint32_t alloc_size,ddi_dma_cookie_t * cookiep)16955 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16956     ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16957     uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16958 {
16959 	ddi_dma_cookie_t	new_cookie;
16960 	size_t			alloc_len;
16961 	uint_t			ncookie;
16962 
16963 	if (cookiep == NULL)
16964 		cookiep = &new_cookie;
16965 
16966 	if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16967 	    NULL, dma_hdp) != DDI_SUCCESS) {
16968 		return (FALSE);
16969 	}
16970 
16971 	if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16972 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16973 	    acc_hdp) != DDI_SUCCESS) {
16974 		ddi_dma_free_handle(dma_hdp);
16975 		*dma_hdp = NULL;
16976 		return (FALSE);
16977 	}
16978 
16979 	if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16980 	    (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16981 	    cookiep, &ncookie) != DDI_DMA_MAPPED) {
16982 		(void) ddi_dma_mem_free(acc_hdp);
16983 		ddi_dma_free_handle(dma_hdp);
16984 		*dma_hdp = NULL;
16985 		return (FALSE);
16986 	}
16987 
16988 	return (TRUE);
16989 }
16990 
16991 void
mptsas_dma_addr_destroy(ddi_dma_handle_t * dma_hdp,ddi_acc_handle_t * acc_hdp)16992 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16993 {
16994 	if (*dma_hdp == NULL)
16995 		return;
16996 
16997 	(void) ddi_dma_unbind_handle(*dma_hdp);
16998 	(void) ddi_dma_mem_free(acc_hdp);
16999 	ddi_dma_free_handle(dma_hdp);
17000 	*dma_hdp = NULL;
17001 }
17002 
17003 /*
17004  * DDI UFM Callbacks
17005  */
17006 static int
mptsas_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)17007 mptsas_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
17008     ddi_ufm_image_t *img)
17009 {
17010 	if (imgno != 0)
17011 		return (EINVAL);
17012 
17013 	ddi_ufm_image_set_desc(img, "IOC Firmware");
17014 	ddi_ufm_image_set_nslots(img, 1);
17015 
17016 	return (0);
17017 }
17018 
17019 static int
mptsas_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)17020 mptsas_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
17021     uint_t slotno, ddi_ufm_slot_t *slot)
17022 {
17023 	mptsas_t *mpt = (mptsas_t *)arg;
17024 	char *buf;
17025 
17026 	if (imgno != 0 || slotno != 0 ||
17027 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, mpt->m_dip,
17028 	    DDI_PROP_DONTPASS, "firmware-version", &buf) != DDI_PROP_SUCCESS)
17029 		return (EINVAL);
17030 
17031 	ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
17032 	ddi_ufm_slot_set_version(slot, buf);
17033 
17034 	ddi_prop_free(buf);
17035 
17036 	return (0);
17037 }
17038 
17039 static int
mptsas_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)17040 mptsas_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
17041 {
17042 	*caps = DDI_UFM_CAP_REPORT;
17043 
17044 	return (0);
17045 }
17046