1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Racktop Systems, Inc.
14  */
15 
16 /*
17  * This file implements the basic HBA interface to SCSAv3.
18  *
19  * For target initialization, we'll look up the driver target state by the
20  * device address and set it as HBA private in the struct scsi_device.
21  *
22  * The tran_reset(9e) and tran_abort(9e) entry points are implemented by a
23  * common function that sends the appropriate task management request to the
24  * target, iff the target supports task management requests. There is no support
25  * for bus resets. The case of RESET_ALL is special: sd(4d) issues a RESET_ALL
26  * in sddump() and errors out if that fails, so even if task management is
27  * unsupported by a target or the reset fails for any other reason, we return
28  * success. Any I/O errors due to an unsuccessful reset will be caught later.
29  *
30  * The tran_start(9e) code paths are almost identical for physical and logical
31  * devices, the major difference being that PDs will have the DevHandle in the
32  * MPT I/O frame set to the invalid DevHandle (0xffff), while LDs will use the
33  * target ID. Also, special settings are applied for LDs and PDs in the RAID
34  * context (VendorRegion of the MPT I/O frame). There is no support for fastpath
35  * I/O.
36  *
37  * In tran_setup_pkt(9e), a MPT command is allocated for the scsi_pkt, and its
38  * members are initialized as follows:
39  * - pkt_cdbp will point to the CDB structure embedded in the MPT I/O frame
40  * - pkt_scbp will point to the struct scsi_arq_status in the sense DMA memory
41  *   allocated for the MPT command
42  * - pkt_scblen will be set to the size of the sense DMA memory, minus alignment
43  * - SenseBufferLowAddress and SenseBufferLength in the MPT I/O frame will be
44  *   set to the sense DMA address and length, respectively, adjusted to account
45  *   for the space needed for the ARQ pkt and alignment.
46  * - There is no SenseBufferHighAddress.
47  * - rc_timeout is set to pkt_time, but it is unknown if that has any effect
48  */
49 
50 #include <sys/types.h>
51 #include <sys/ddi.h>
52 #include <sys/sunddi.h>
53 #include <sys/scsi/scsi.h>
54 
55 #include "lmrc.h"
56 #include "lmrc_reg.h"
57 
58 static int lmrc_getcap(struct scsi_address *, char *, int);
59 static int lmrc_setcap(struct scsi_address *, char *, int, int);
60 
61 static int lmrc_tran_tgt_init(dev_info_t *, dev_info_t *,
62     scsi_hba_tran_t *, struct scsi_device *);
63 static void lmrc_tran_tgt_free(dev_info_t *, dev_info_t *,
64     scsi_hba_tran_t *, struct scsi_device *);
65 
66 static int lmrc_tran_abort(struct scsi_address *, struct scsi_pkt *);
67 static int lmrc_tran_reset(struct scsi_address *, int);
68 
69 static int lmrc_tran_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t);
70 static void lmrc_tran_teardown_pkt(struct scsi_pkt *);
71 
72 boolean_t lmrc_relaxed_ordering = B_TRUE;
73 
74 static int
lmrc_getcap(struct scsi_address * sa,char * cap,int whom)75 lmrc_getcap(struct scsi_address *sa, char *cap, int whom)
76 {
77 	struct scsi_device *sd = scsi_address_device(sa);
78 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
79 	lmrc_t *lmrc = tgt->tgt_lmrc;
80 	int index;
81 
82 	VERIFY(lmrc != NULL);
83 
84 	if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE)
85 		return (-1);
86 
87 	switch (index) {
88 	case SCSI_CAP_CDB_LEN:
89 		return (sizeof (((Mpi25SCSIIORequest_t *)NULL)->CDB.CDB32));
90 
91 	case SCSI_CAP_DMA_MAX:
92 		if (lmrc->l_dma_attr.dma_attr_maxxfer > INT_MAX)
93 			return (INT_MAX);
94 		return (lmrc->l_dma_attr.dma_attr_maxxfer);
95 
96 	case SCSI_CAP_SECTOR_SIZE:
97 		if (lmrc->l_dma_attr.dma_attr_granular > INT_MAX)
98 			return (INT_MAX);
99 		return (lmrc->l_dma_attr.dma_attr_granular);
100 
101 	case SCSI_CAP_INTERCONNECT_TYPE: {
102 		uint8_t interconnect_type;
103 
104 		rw_enter(&tgt->tgt_lock, RW_READER);
105 		interconnect_type = tgt->tgt_interconnect_type;
106 		rw_exit(&tgt->tgt_lock);
107 		return (interconnect_type);
108 	}
109 	case SCSI_CAP_MSG_OUT:
110 	case SCSI_CAP_WIDE_XFER:
111 	case SCSI_CAP_TAGGED_QING:
112 	case SCSI_CAP_UNTAGGED_QING:
113 	case SCSI_CAP_PARITY:
114 	case SCSI_CAP_ARQ:
115 		return (1);
116 
117 	case SCSI_CAP_RESET_NOTIFICATION:
118 	case SCSI_CAP_DISCONNECT:
119 	case SCSI_CAP_SYNCHRONOUS:
120 	case SCSI_CAP_LINKED_CMDS:
121 	case SCSI_CAP_INITIATOR_ID:
122 		return (0);
123 
124 	default:
125 		return (-1);
126 	}
127 }
128 
129 static int
lmrc_setcap(struct scsi_address * sa,char * cap,int value,int whom)130 lmrc_setcap(struct scsi_address *sa, char *cap, int value, int whom)
131 {
132 	struct scsi_device *sd = scsi_address_device(sa);
133 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
134 	lmrc_t *lmrc = tgt->tgt_lmrc;
135 	int index;
136 
137 	VERIFY(lmrc != NULL);
138 
139 	if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE)
140 		return (-1);
141 
142 	if (whom == 0)
143 		return (-1);
144 
145 	switch (index) {
146 	case SCSI_CAP_DMA_MAX:
147 		if (value <= lmrc->l_dma_attr.dma_attr_maxxfer)
148 			return (1);
149 		else
150 			return (0);
151 
152 	case SCSI_CAP_MSG_OUT:
153 	case SCSI_CAP_WIDE_XFER:
154 	case SCSI_CAP_TAGGED_QING:
155 	case SCSI_CAP_UNTAGGED_QING:
156 	case SCSI_CAP_PARITY:
157 	case SCSI_CAP_ARQ:
158 		if (value == 1)
159 			return (1);
160 		else
161 			return (0);
162 
163 	case SCSI_CAP_RESET_NOTIFICATION:
164 	case SCSI_CAP_DISCONNECT:
165 	case SCSI_CAP_SYNCHRONOUS:
166 	case SCSI_CAP_LINKED_CMDS:
167 	case SCSI_CAP_INITIATOR_ID:
168 		if (value == 0)
169 			return (1);
170 		else
171 			return (0);
172 
173 	case SCSI_CAP_SECTOR_SIZE:
174 	case SCSI_CAP_TOTAL_SECTORS:
175 		return (0);
176 
177 	default:
178 		return (-1);
179 	}
180 }
181 
182 /*
183  * lmrc_tran_tgt_init
184  *
185  * Find the driver target state and link it with the scsi_device.
186  */
187 static int
lmrc_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)188 lmrc_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
189     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
190 {
191 	lmrc_t *lmrc = hba_tran->tran_hba_private;
192 	lmrc_tgt_t *tgt;
193 
194 	VERIFY(lmrc != NULL);
195 
196 	tgt = lmrc_tgt_find(lmrc, sd);
197 	if (tgt == NULL)
198 		return (DDI_FAILURE);
199 
200 	/* lmrc_tgt_find() returns the target read-locked. */
201 	scsi_device_hba_private_set(sd, tgt);
202 	rw_exit(&tgt->tgt_lock);
203 
204 
205 	return (DDI_SUCCESS);
206 }
207 
208 static void
lmrc_tran_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)209 lmrc_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
210     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
211 {
212 	scsi_device_hba_private_set(sd, NULL);
213 }
214 
215 /*
216  * lmrc_tran_start
217  *
218  * Start I/O of a scsi_pkt. Set up the MPT frame, the RAID context and if
219  * necessary the SGL for the transfer. Wait for a reply if this is polled I/O.
220  *
221  * There are subtle differences in the way I/O is done for LDs and PDs.
222  *
223  * There is no support for fastpath I/O.
224  */
225 static int
lmrc_tran_start(struct scsi_address * sa,struct scsi_pkt * pkt)226 lmrc_tran_start(struct scsi_address *sa, struct scsi_pkt *pkt)
227 {
228 	Mpi25SCSIIORequest_t *io_req;
229 	lmrc_atomic_req_desc_t req_desc;
230 	lmrc_raidctx_g35_t *rc;
231 	struct scsi_device *sd;
232 	lmrc_scsa_cmd_t *cmd;
233 	lmrc_mpt_cmd_t *mpt;
234 	lmrc_tgt_t *tgt;
235 	lmrc_t *lmrc;
236 	uint8_t req_flags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
237 	boolean_t intr = (pkt->pkt_flags & FLAG_NOINTR) == 0;
238 	int ret = TRAN_BADPKT;
239 
240 	/*
241 	 * FLAG_NOINTR was set but we're not panicked. This may theoretically
242 	 * happen if scsi_transport() is called from an interrupt thread, and
243 	 * we don't support this.
244 	 */
245 	if (!intr && !ddi_in_panic())
246 		return (ret);
247 
248 	sd = scsi_address_device(sa);
249 	VERIFY(sd != NULL);
250 
251 	tgt = scsi_device_hba_private_get(sd);
252 	VERIFY(tgt != NULL);
253 
254 	cmd = pkt->pkt_ha_private;
255 	VERIFY(cmd != NULL);
256 
257 	VERIFY(cmd->sc_tgt == tgt);
258 
259 	lmrc = tgt->tgt_lmrc;
260 	VERIFY(lmrc != NULL);
261 
262 	if (lmrc->l_fw_fault)
263 		return (TRAN_FATAL_ERROR);
264 
265 	if (atomic_inc_uint_nv(&lmrc->l_fw_outstanding_cmds) >
266 	    lmrc->l_max_scsi_cmds) {
267 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
268 		return (TRAN_BUSY);
269 	}
270 
271 	rw_enter(&tgt->tgt_lock, RW_READER);
272 
273 	mpt = cmd->sc_mpt;
274 	VERIFY(mpt != NULL);
275 	mutex_enter(&mpt->mpt_lock);
276 
277 	io_req = mpt->mpt_io_frame;
278 
279 	io_req->Function = LMRC_MPI2_FUNCTION_LD_IO_REQUEST;
280 
281 	rc = &io_req->VendorRegion;
282 	rc->rc_ld_tgtid = tgt->tgt_dev_id;
283 
284 	if (tgt->tgt_pd_info == NULL) {
285 		/* This is LD I/O */
286 		io_req->DevHandle = tgt->tgt_dev_id;
287 
288 		if (lmrc_cmd_is_rw(pkt->pkt_cdbp[0])) {
289 			rc->rc_type = MPI2_TYPE_CUDA;
290 			rc->rc_nseg = 1;
291 			rc->rc_routing_flags.rf_sqn = 1;
292 		}
293 	} else {
294 		/* This is PD I/O */
295 		io_req->DevHandle = LMRC_DEVHDL_INVALID;
296 		rc->rc_raid_flags.rf_io_subtype = LMRC_RF_IO_SUBTYPE_SYSTEM_PD;
297 
298 		if (tgt->tgt_type == DTYPE_DIRECT &&
299 		    lmrc->l_use_seqnum_jbod_fp) {
300 			lmrc_pd_cfg_t *pdcfg;
301 
302 			rw_enter(&lmrc->l_pdmap_lock, RW_READER);
303 			pdcfg = &lmrc->l_pdmap->pm_pdcfg[tgt->tgt_dev_id];
304 
305 			if (lmrc->l_pdmap_tgtid_support)
306 				rc->rc_ld_tgtid = pdcfg->pd_tgtid;
307 
308 			rc->rc_cfg_seqnum = pdcfg->pd_seqnum;
309 			io_req->DevHandle = pdcfg->pd_devhdl;
310 			rw_exit(&lmrc->l_pdmap_lock);
311 
312 			if (lmrc_cmd_is_rw(pkt->pkt_cdbp[0])) {
313 				/*
314 				 * MPI2_TYPE_CUDA is valid only if FW supports
315 				 * JBOD Sequence number
316 				 */
317 				rc->rc_type = MPI2_TYPE_CUDA;
318 				rc->rc_nseg = 1;
319 				rc->rc_routing_flags.rf_sqn = 1;
320 
321 				io_req->Function =
322 				    MPI2_FUNCTION_SCSI_IO_REQUEST;
323 				io_req->IoFlags |=
324 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
325 				req_flags =
326 				    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
327 			}
328 		}
329 
330 	}
331 
332 	if (pkt->pkt_numcookies > 0) {
333 		if ((pkt->pkt_dma_flags & DDI_DMA_READ) != 0)
334 			io_req->Control |= MPI2_SCSIIO_CONTROL_READ;
335 
336 		if ((pkt->pkt_dma_flags & DDI_DMA_WRITE) != 0)
337 			io_req->Control |= MPI2_SCSIIO_CONTROL_WRITE;
338 
339 		lmrc_dma_build_sgl(lmrc, mpt, pkt->pkt_cookies,
340 		    pkt->pkt_numcookies);
341 
342 		io_req->DataLength = pkt->pkt_dma_len;
343 
344 		rc->rc_num_sge = pkt->pkt_numcookies;
345 	}
346 
347 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
348 	    (void *)io_req - lmrc->l_ioreq_dma.ld_buf,
349 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
350 	    ==, DDI_SUCCESS);
351 
352 	req_desc = lmrc_build_atomic_request(lmrc, mpt, req_flags);
353 
354 	mpt->mpt_timeout = gethrtime() + pkt->pkt_time * NANOSEC;
355 	lmrc_send_atomic_request(lmrc, req_desc);
356 
357 	if (intr) {
358 		/* normal interrupt driven I/O processing */
359 		lmrc_tgt_add_active_mpt(tgt, mpt);
360 		ret = TRAN_ACCEPT;
361 	} else {
362 		/* FLAG_NOINTR was set and we're panicked */
363 		VERIFY(ddi_in_panic());
364 
365 		ret = lmrc_poll_for_reply(lmrc, mpt);
366 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
367 	}
368 
369 	mutex_exit(&mpt->mpt_lock);
370 	rw_exit(&tgt->tgt_lock);
371 
372 	return (ret);
373 }
374 
375 /*
376  * lmrc_task_mgmt
377  *
378  * Send a TASK MGMT command to a target, provied it is TM capable.
379  */
380 static int
lmrc_task_mgmt(lmrc_t * lmrc,lmrc_tgt_t * tgt,uint8_t type,uint16_t smid)381 lmrc_task_mgmt(lmrc_t *lmrc, lmrc_tgt_t *tgt, uint8_t type, uint16_t smid)
382 {
383 	Mpi2SCSITaskManagementRequest_t *tm_req;
384 	Mpi2SCSITaskManagementReply_t *tm_reply;
385 	uint64_t *pd_ld_flags;
386 	lmrc_atomic_req_desc_t req_desc;
387 	lmrc_mpt_cmd_t *mpt;
388 	clock_t ret;
389 	boolean_t tm_capable;
390 
391 	rw_enter(&tgt->tgt_lock, RW_READER);
392 
393 	/* Make sure the target can handle task mgmt commands. */
394 	if (tgt->tgt_pd_info == NULL) {
395 		tm_capable = lmrc_ld_tm_capable(lmrc, tgt->tgt_dev_id);
396 	} else {
397 		tm_capable = lmrc_pd_tm_capable(lmrc, tgt->tgt_dev_id);
398 	}
399 
400 	if (!tm_capable) {
401 		rw_exit(&tgt->tgt_lock);
402 		return (0);
403 	}
404 
405 	if (atomic_inc_uint_nv(&lmrc->l_fw_outstanding_cmds) >
406 	    lmrc->l_max_scsi_cmds) {
407 		rw_exit(&tgt->tgt_lock);
408 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
409 		return (0);
410 	}
411 
412 	mpt = lmrc_get_mpt(lmrc);
413 	if (mpt == NULL) {
414 		rw_exit(&tgt->tgt_lock);
415 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
416 		return (0);
417 	}
418 	ASSERT(mutex_owned(&mpt->mpt_lock));
419 
420 
421 	bzero(mpt->mpt_io_frame, LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
422 	tm_req = mpt->mpt_io_frame;
423 	tm_reply = mpt->mpt_io_frame + 128;
424 	pd_ld_flags = (uint64_t *)tm_reply;
425 
426 
427 	tm_req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
428 	tm_req->TaskType = type;
429 	tm_req->TaskMID = smid;
430 	tm_req->DevHandle = tgt->tgt_dev_id;
431 
432 	/*
433 	 * The uint32_t immediately following the MPI2 task management request
434 	 * contains two flags indicating whether the target is a LD or PD.
435 	 */
436 	if (tgt->tgt_pd_info == NULL)
437 		*pd_ld_flags = 1<<0;
438 	else
439 		*pd_ld_flags = 1<<1;
440 
441 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
442 	    (void *)tm_req - lmrc->l_ioreq_dma.ld_buf,
443 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
444 	    ==, DDI_SUCCESS);
445 
446 	req_desc = lmrc_build_atomic_request(lmrc, mpt,
447 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY);
448 
449 	lmrc_send_atomic_request(lmrc, req_desc);
450 
451 	/* Poll for completion if we're called while the system is panicked. */
452 	if (ddi_in_panic()) {
453 		ret = lmrc_poll_for_reply(lmrc, mpt);
454 	} else {
455 		clock_t timeout = drv_usectohz(LMRC_RESET_WAIT_TIME * MICROSEC);
456 
457 		timeout += ddi_get_lbolt();
458 		do {
459 			ret = cv_timedwait(&mpt->mpt_cv, &mpt->mpt_lock,
460 			    timeout);
461 		} while (mpt->mpt_complete == B_FALSE && ret != -1);
462 	}
463 
464 	atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
465 	lmrc_put_mpt(mpt);
466 	rw_exit(&tgt->tgt_lock);
467 
468 	if (ret >= 0)
469 		return (1);
470 	else
471 		return (-1);
472 }
473 
474 /*
475  * lmrc_abort_mpt
476  *
477  * Abort a MPT command by sending a TASK MGMT ABORT TASK command.
478  */
479 int
lmrc_abort_mpt(lmrc_t * lmrc,lmrc_tgt_t * tgt,lmrc_mpt_cmd_t * mpt)480 lmrc_abort_mpt(lmrc_t *lmrc, lmrc_tgt_t *tgt, lmrc_mpt_cmd_t *mpt)
481 {
482 	ASSERT(mutex_owned(&tgt->tgt_mpt_active_lock));
483 	ASSERT(mutex_owned(&mpt->mpt_lock));
484 
485 	return (lmrc_task_mgmt(lmrc, tgt, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
486 	    mpt->mpt_smid));
487 }
488 
489 /*
490  * lmrc_tran_abort
491  *
492  * Send a SCSI TASK MGMT request to abort a packet.
493  */
494 static int
lmrc_tran_abort(struct scsi_address * sa,struct scsi_pkt * pkt)495 lmrc_tran_abort(struct scsi_address *sa, struct scsi_pkt *pkt)
496 {
497 	struct scsi_device *sd = scsi_address_device(sa);
498 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
499 	lmrc_t *lmrc = tgt->tgt_lmrc;
500 	lmrc_scsa_cmd_t *cmd;
501 	lmrc_mpt_cmd_t *mpt;
502 	int ret = 0;
503 
504 	VERIFY(lmrc != NULL);
505 
506 	if (lmrc->l_fw_fault)
507 		return (0);
508 
509 	/*
510 	 * If no pkt was given, abort all outstanding pkts for this target.
511 	 */
512 	if (pkt == NULL) {
513 		mutex_enter(&tgt->tgt_mpt_active_lock);
514 		for (mpt = lmrc_tgt_first_active_mpt(tgt);
515 		    mpt != NULL;
516 		    mpt = lmrc_tgt_next_active_mpt(tgt, mpt)) {
517 			ASSERT(mutex_owned(&mpt->mpt_lock));
518 			if (mpt->mpt_complete)
519 				continue;
520 			if (mpt->mpt_pkt == NULL)
521 				continue;
522 
523 			if (lmrc_abort_mpt(lmrc, tgt, mpt) > 0)
524 				ret = 1;
525 		}
526 		mutex_exit(&tgt->tgt_mpt_active_lock);
527 
528 		return (ret);
529 	}
530 
531 	cmd = pkt->pkt_ha_private;
532 
533 	VERIFY(cmd != NULL);
534 	VERIFY(cmd->sc_tgt == tgt);
535 
536 	mpt = cmd->sc_mpt;
537 	VERIFY(mpt != NULL);
538 
539 	mutex_enter(&mpt->mpt_lock);
540 	ret = lmrc_abort_mpt(lmrc, tgt, mpt);
541 	mutex_exit(&mpt->mpt_lock);
542 
543 	if (ret == -1) {
544 		dev_err(lmrc->l_dip, CE_WARN, "!target reset timed out, "
545 		    "tgt %d", tgt->tgt_dev_id);
546 		return (0);
547 	}
548 
549 	return (ret);
550 }
551 
552 /*
553  * lmrc_tran_reset
554  *
555  * Reset a target. There's no support for RESET_LUN or RESET_ALL.
556  */
557 static int
lmrc_tran_reset(struct scsi_address * sa,int level)558 lmrc_tran_reset(struct scsi_address *sa, int level)
559 {
560 	struct scsi_device *sd = scsi_address_device(sa);
561 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
562 	lmrc_t *lmrc = tgt->tgt_lmrc;
563 	int ret = 0;
564 
565 	VERIFY(lmrc != NULL);
566 
567 	if (lmrc->l_fw_fault)
568 		return (0);
569 
570 	switch (level) {
571 	case RESET_ALL:
572 	case RESET_LUN:
573 	case RESET_TARGET:
574 		rw_enter(&tgt->tgt_lock, RW_READER);
575 		ret = lmrc_task_mgmt(lmrc, tgt,
576 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0);
577 		rw_exit(&tgt->tgt_lock);
578 
579 		if (ret == -1) {
580 			dev_err(lmrc->l_dip, CE_WARN,
581 			    "!target reset timed out, tgt %d",
582 			    tgt->tgt_dev_id);
583 			return (0);
584 		}
585 
586 		break;
587 	}
588 
589 	/*
590 	 * Fake a successful return in the case of RESET_ALL for the benefit of
591 	 * being able to save kernel core dumps. sddump() wants to reset the
592 	 * device and errors out if that fails, even if that happens not because
593 	 * of an error but because of a reset not being supported.
594 	 */
595 	if (ret == 0 && level == RESET_ALL)
596 		ret = 1;
597 
598 	return (ret);
599 }
600 
601 /*
602  * lmrc_tran_setup_pkt
603  *
604  * Set up a MPT command for a scsi_pkt, and initialize scsi_pkt members as
605  * needed:
606  * - pkt_cdbp will point to the CDB structure embedded in the MPT I/O frame
607  * - pkt_scbp will point to the struct scsi_arq_status in the sense DMA memory
608  *   allocated for the MPT command
609  * - pkt_scblen will be set to the size of the sense DMA memory, minus alignment
610  * - SenseBufferLowAddress and SenseBufferLength in the MPT I/O frame will be
611  *   set to the sense DMA address and length, respectively, adjusted to account
612  *   for the space needed for the ARQ pkt and alignment.
613  * - There is no SenseBufferHighAddress.
614  * - rc_timeout is set to pkt_time, but it is unknown if that has any effect
615  *
616  * The procedure is the same irrespective of whether the command is sent to a
617  * physical device or RAID volume.
618  */
619 static int
lmrc_tran_setup_pkt(struct scsi_pkt * pkt,int (* callback)(caddr_t),caddr_t arg)620 lmrc_tran_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t),
621     caddr_t arg)
622 {
623 	struct scsi_address *sa;
624 	struct scsi_device *sd;
625 	lmrc_tgt_t *tgt;
626 	lmrc_t *lmrc;
627 	lmrc_scsa_cmd_t *cmd;
628 	lmrc_mpt_cmd_t *mpt;
629 	Mpi25SCSIIORequest_t *io_req;
630 	lmrc_raidctx_g35_t *rc;
631 
632 	if (pkt->pkt_cdblen > sizeof (io_req->CDB.CDB32))
633 		return (-1);
634 
635 	sa = &pkt->pkt_address;
636 	VERIFY(sa != NULL);
637 
638 	sd = scsi_address_device(sa);
639 	VERIFY(sd != NULL);
640 
641 	tgt = scsi_device_hba_private_get(sd);
642 	VERIFY(tgt != NULL);
643 
644 	rw_enter(&tgt->tgt_lock, RW_READER);
645 
646 	lmrc = tgt->tgt_lmrc;
647 	VERIFY(lmrc != NULL);
648 
649 	cmd = pkt->pkt_ha_private;
650 	ASSERT(cmd != NULL);
651 
652 	mpt = lmrc_get_mpt(lmrc);
653 	if (mpt == NULL) {
654 		rw_exit(&tgt->tgt_lock);
655 		return (-1);
656 	}
657 	ASSERT(mutex_owned(&mpt->mpt_lock));
658 
659 
660 	io_req = mpt->mpt_io_frame;
661 
662 	pkt->pkt_cdbp = io_req->CDB.CDB32;
663 
664 	/* Just the CDB length now, but other flags may be set later. */
665 	io_req->IoFlags = pkt->pkt_cdblen;
666 
667 	/*
668 	 * Set up sense buffer. The DMA memory was setup to holds the whole ARQ
669 	 * structure aligned so that its sts_sensedata is aligned to 64 bytes.
670 	 * Point SenseBufferLowAddress to sts_sensedata and reduce the length
671 	 * accordingly.
672 	 */
673 	pkt->pkt_scbp = mpt->mpt_sense;
674 	pkt->pkt_scblen = lmrc_dma_get_size(&mpt->mpt_sense_dma) - 64 +
675 	    offsetof(struct scsi_arq_status, sts_sensedata);
676 
677 	lmrc_dma_set_addr32(&mpt->mpt_sense_dma,
678 	    &io_req->SenseBufferLowAddress);
679 	io_req->SenseBufferLowAddress +=
680 	    P2ROUNDUP(offsetof(struct scsi_arq_status, sts_sensedata), 64);
681 	io_req->SenseBufferLength = pkt->pkt_scblen -
682 	    offsetof(struct scsi_arq_status, sts_sensedata);
683 
684 	rc = &io_req->VendorRegion;
685 	rc->rc_timeout = pkt->pkt_time;
686 
687 	cmd->sc_mpt = mpt;
688 	cmd->sc_tgt = tgt;
689 	mpt->mpt_pkt = pkt;
690 	mutex_exit(&mpt->mpt_lock);
691 	rw_exit(&tgt->tgt_lock);
692 
693 	return (0);
694 }
695 
696 /*
697  * lmrc_tran_teardown_pkt
698  *
699  * Return the MPT command to the free list. It'll be cleared later before
700  * it is reused.
701  */
702 static void
lmrc_tran_teardown_pkt(struct scsi_pkt * pkt)703 lmrc_tran_teardown_pkt(struct scsi_pkt *pkt)
704 {
705 	lmrc_scsa_cmd_t *cmd;
706 	lmrc_mpt_cmd_t *mpt;
707 
708 	cmd = pkt->pkt_ha_private;
709 	ASSERT(cmd != NULL);
710 
711 	mpt = cmd->sc_mpt;
712 	ASSERT(mpt != NULL);
713 
714 	mutex_enter(&mpt->mpt_lock);
715 	lmrc_put_mpt(mpt);
716 }
717 
718 /*
719  * lmrc_hba_attach
720  *
721  * Set up the HBA functions of lmrc. This is a SAS controller and uses complex
722  * addressing for targets, presenting physical devices (PDs) and RAID volumes
723  * (LD) as separate iports.
724  */
725 int
lmrc_hba_attach(lmrc_t * lmrc)726 lmrc_hba_attach(lmrc_t *lmrc)
727 {
728 	scsi_hba_tran_t	*tran;
729 	ddi_dma_attr_t tran_attr = lmrc->l_dma_attr_32;
730 
731 	tran = scsi_hba_tran_alloc(lmrc->l_dip, SCSI_HBA_CANSLEEP);
732 	if (tran == NULL) {
733 		dev_err(lmrc->l_dip, CE_WARN, "!scsi_hba_tran_alloc failed");
734 		return (DDI_FAILURE);
735 	}
736 
737 	tran->tran_hba_private = lmrc;
738 
739 	tran->tran_tgt_init = lmrc_tran_tgt_init;
740 	tran->tran_tgt_free = lmrc_tran_tgt_free;
741 
742 	tran->tran_tgt_probe = scsi_hba_probe;
743 
744 	tran->tran_start = lmrc_tran_start;
745 	tran->tran_abort = lmrc_tran_abort;
746 	tran->tran_reset = lmrc_tran_reset;
747 
748 	tran->tran_getcap = lmrc_getcap;
749 	tran->tran_setcap = lmrc_setcap;
750 
751 	tran->tran_setup_pkt = lmrc_tran_setup_pkt;
752 	tran->tran_teardown_pkt = lmrc_tran_teardown_pkt;
753 	tran->tran_hba_len = sizeof (lmrc_scsa_cmd_t);
754 	tran->tran_interconnect_type = INTERCONNECT_SAS;
755 
756 	if (lmrc_relaxed_ordering)
757 		tran_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
758 	tran_attr.dma_attr_sgllen = lmrc->l_max_num_sge;
759 
760 	if (scsi_hba_attach_setup(lmrc->l_dip, &tran_attr, tran,
761 	    SCSI_HBA_HBA | SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS)
762 		goto fail;
763 
764 	lmrc->l_hba_tran = tran;
765 
766 	if (scsi_hba_iport_register(lmrc->l_dip, LMRC_IPORT_RAID) !=
767 	    DDI_SUCCESS)
768 		goto fail;
769 
770 	if (scsi_hba_iport_register(lmrc->l_dip, LMRC_IPORT_PHYS) !=
771 	    DDI_SUCCESS)
772 		goto fail;
773 
774 	return (DDI_SUCCESS);
775 
776 fail:
777 	dev_err(lmrc->l_dip, CE_WARN,
778 	    "!could not attach to SCSA framework");
779 	lmrc_hba_detach(lmrc);
780 
781 	return (DDI_FAILURE);
782 }
783 
784 void
lmrc_hba_detach(lmrc_t * lmrc)785 lmrc_hba_detach(lmrc_t *lmrc)
786 {
787 	if (lmrc->l_hba_tran == NULL)
788 		return;
789 
790 	(void) scsi_hba_detach(lmrc->l_dip);
791 	scsi_hba_tran_free(lmrc->l_hba_tran);
792 	lmrc->l_hba_tran = NULL;
793 }
794