1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30 *
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36/* Communications core for Avago Technologies (LSI) MPT3 */
37
38/* TODO Move headers to mprvar */
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/selinfo.h>
44#include <sys/module.h>
45#include <sys/bus.h>
46#include <sys/conf.h>
47#include <sys/bio.h>
48#include <sys/malloc.h>
49#include <sys/uio.h>
50#include <sys/sysctl.h>
51#include <sys/endian.h>
52#include <sys/queue.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/sbuf.h>
56
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_debug.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_periph.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72#include <cam/scsi/smp_all.h>
73
74#include <dev/nvme/nvme.h>
75
76#include <dev/mpr/mpi/mpi2_type.h>
77#include <dev/mpr/mpi/mpi2.h>
78#include <dev/mpr/mpi/mpi2_ioc.h>
79#include <dev/mpr/mpi/mpi2_sas.h>
80#include <dev/mpr/mpi/mpi2_pci.h>
81#include <dev/mpr/mpi/mpi2_cnfg.h>
82#include <dev/mpr/mpi/mpi2_init.h>
83#include <dev/mpr/mpi/mpi2_tool.h>
84#include <dev/mpr/mpr_ioctl.h>
85#include <dev/mpr/mprvar.h>
86#include <dev/mpr/mpr_table.h>
87#include <dev/mpr/mpr_sas.h>
88
89#define MPRSAS_DISCOVERY_TIMEOUT	20
90#define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91
92/*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98static uint8_t op_code_prot[256] = {
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115};
116
117MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
118
119static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
120static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
121static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
122static void mprsas_poll(struct cam_sim *sim);
123static void mprsas_scsiio_timeout(void *data);
124static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
125static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
126static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
127static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
128static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
129static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130    struct mpr_command *cm);
131static void mprsas_async(void *callback_arg, uint32_t code,
132    struct cam_path *path, void *arg);
133static int mprsas_send_portenable(struct mpr_softc *sc);
134static void mprsas_portenable_complete(struct mpr_softc *sc,
135    struct mpr_command *cm);
136
137static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
138static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
139    uint64_t sasaddr);
140static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
141
142struct mprsas_target *
143mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
144    uint16_t handle)
145{
146	struct mprsas_target *target;
147	int i;
148
149	for (i = start; i < sassc->maxtargets; i++) {
150		target = &sassc->targets[i];
151		if (target->handle == handle)
152			return (target);
153	}
154
155	return (NULL);
156}
157
158/* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery.  Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
164 */
165void
166mprsas_startup_increment(struct mprsas_softc *sassc)
167{
168	MPR_FUNCTRACE(sassc->sc);
169
170	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
171		if (sassc->startup_refcount++ == 0) {
172			/* just starting, freeze the simq */
173			mpr_dprint(sassc->sc, MPR_INIT,
174			    "%s freezing simq\n", __func__);
175			xpt_hold_boot();
176			xpt_freeze_simq(sassc->sim, 1);
177		}
178		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
179		    sassc->startup_refcount);
180	}
181}
182
183void
184mprsas_release_simq_reinit(struct mprsas_softc *sassc)
185{
186	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
187		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
188		xpt_release_simq(sassc->sim, 1);
189		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
190	}
191}
192
193void
194mprsas_startup_decrement(struct mprsas_softc *sassc)
195{
196	MPR_FUNCTRACE(sassc->sc);
197
198	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
199		if (--sassc->startup_refcount == 0) {
200			/* finished all discovery-related actions, release
201			 * the simq and rescan for the latest topology.
202			 */
203			mpr_dprint(sassc->sc, MPR_INIT,
204			    "%s releasing simq\n", __func__);
205			sassc->flags &= ~MPRSAS_IN_STARTUP;
206			xpt_release_simq(sassc->sim, 1);
207			xpt_release_boot();
208		}
209		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
210		    sassc->startup_refcount);
211	}
212}
213
214/*
215 * The firmware requires us to stop sending commands when we're doing task
216 * management.
217 * use.
218 * XXX The logic for serializing the device has been made lazy and moved to
219 * mprsas_prepare_for_tm().
220 */
221struct mpr_command *
222mprsas_alloc_tm(struct mpr_softc *sc)
223{
224	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
225	struct mpr_command *tm;
226
227	MPR_FUNCTRACE(sc);
228	tm = mpr_alloc_high_priority_command(sc);
229	if (tm == NULL)
230		return (NULL);
231
232	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
233	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
234	return tm;
235}
236
237void
238mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
239{
240	int target_id = 0xFFFFFFFF;
241
242	MPR_FUNCTRACE(sc);
243	if (tm == NULL)
244		return;
245
246	/*
247	 * For TM's the devq is frozen for the device.  Unfreeze it here and
248	 * free the resources used for freezing the devq.  Must clear the
249	 * INRESET flag as well or scsi I/O will not work.
250	 */
251	if (tm->cm_targ != NULL) {
252		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253		target_id = tm->cm_targ->tid;
254	}
255	if (tm->cm_ccb) {
256		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
257		    target_id);
258		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259		xpt_free_path(tm->cm_ccb->ccb_h.path);
260		xpt_free_ccb(tm->cm_ccb);
261	}
262
263	mpr_free_high_priority_command(sc, tm);
264}
265
266void
267mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
268{
269	struct mprsas_softc *sassc = sc->sassc;
270	path_id_t pathid;
271	target_id_t targetid;
272	union ccb *ccb;
273
274	MPR_FUNCTRACE(sc);
275	pathid = cam_sim_path(sassc->sim);
276	if (targ == NULL)
277		targetid = CAM_TARGET_WILDCARD;
278	else
279		targetid = targ - sassc->targets;
280
281	/*
282	 * Allocate a CCB and schedule a rescan.
283	 */
284	ccb = xpt_alloc_ccb_nowait();
285	if (ccb == NULL) {
286		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
287		return;
288	}
289
290	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
291	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
293		xpt_free_ccb(ccb);
294		return;
295	}
296
297	if (targetid == CAM_TARGET_WILDCARD)
298		ccb->ccb_h.func_code = XPT_SCAN_BUS;
299	else
300		ccb->ccb_h.func_code = XPT_SCAN_TGT;
301
302	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
303	xpt_rescan(ccb);
304}
305
306static void
307mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
308{
309	struct sbuf sb;
310	va_list ap;
311	char str[224];
312	char path_str[64];
313
314	if (cm == NULL)
315		return;
316
317	/* No need to be in here if debugging isn't enabled */
318	if ((cm->cm_sc->mpr_debug & level) == 0)
319		return;
320
321	sbuf_new(&sb, str, sizeof(str), 0);
322
323	va_start(ap, fmt);
324
325	if (cm->cm_ccb != NULL) {
326		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327		    sizeof(path_str));
328		sbuf_cat(&sb, path_str);
329		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330			scsi_command_string(&cm->cm_ccb->csio, &sb);
331			sbuf_printf(&sb, "length %d ",
332			    cm->cm_ccb->csio.dxfer_len);
333		}
334	} else {
335		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
336		    cam_sim_name(cm->cm_sc->sassc->sim),
337		    cam_sim_unit(cm->cm_sc->sassc->sim),
338		    cam_sim_bus(cm->cm_sc->sassc->sim),
339		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
340		    cm->cm_lun);
341	}
342
343	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
344	sbuf_vprintf(&sb, fmt, ap);
345	sbuf_finish(&sb);
346	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
347
348	va_end(ap);
349}
350
351static void
352mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
353{
354	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
355	struct mprsas_target *targ;
356	uint16_t handle;
357
358	MPR_FUNCTRACE(sc);
359
360	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
361	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
362	targ = tm->cm_targ;
363
364	if (reply == NULL) {
365		/* XXX retry the remove after the diag reset completes? */
366		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
367		    "0x%04x\n", __func__, handle);
368		mprsas_free_tm(sc, tm);
369		return;
370	}
371
372	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373	    MPI2_IOCSTATUS_SUCCESS) {
374		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
375		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
376	}
377
378	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
379	    le32toh(reply->TerminationCount));
380	mpr_free_reply(sc, tm->cm_reply_data);
381	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382
383	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
384	    targ->tid, handle);
385
386	/*
387	 * Don't clear target if remove fails because things will get confusing.
388	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389	 * this target id if possible, and so we can assign the same target id
390	 * to this device if it comes back in the future.
391	 */
392	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
393	    MPI2_IOCSTATUS_SUCCESS) {
394		targ = tm->cm_targ;
395		targ->handle = 0x0;
396		targ->encl_handle = 0x0;
397		targ->encl_level_valid = 0x0;
398		targ->encl_level = 0x0;
399		targ->connector_name[0] = ' ';
400		targ->connector_name[1] = ' ';
401		targ->connector_name[2] = ' ';
402		targ->connector_name[3] = ' ';
403		targ->encl_slot = 0x0;
404		targ->exp_dev_handle = 0x0;
405		targ->phy_num = 0x0;
406		targ->linkrate = 0x0;
407		targ->devinfo = 0x0;
408		targ->flags = 0x0;
409		targ->scsi_req_desc_type = 0;
410	}
411
412	mprsas_free_tm(sc, tm);
413}
414
415
416/*
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
419 */
420void
421mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
422{
423	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424	struct mpr_softc *sc;
425	struct mpr_command *cm;
426	struct mprsas_target *targ = NULL;
427
428	MPR_FUNCTRACE(sassc->sc);
429	sc = sassc->sc;
430
431	targ = mprsas_find_target_by_handle(sassc, 0, handle);
432	if (targ == NULL) {
433		/* FIXME: what is the action? */
434		/* We don't know about this device? */
435		mpr_dprint(sc, MPR_ERROR,
436		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
437		return;
438	}
439
440	targ->flags |= MPRSAS_TARGET_INREMOVAL;
441
442	cm = mprsas_alloc_tm(sc);
443	if (cm == NULL) {
444		mpr_dprint(sc, MPR_ERROR,
445		    "%s: command alloc failure\n", __func__);
446		return;
447	}
448
449	mprsas_rescan_target(sc, targ);
450
451	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
452	req->DevHandle = targ->handle;
453	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
454
455	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
456		/* SAS Hard Link Reset / SATA Link Reset */
457		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
458	} else {
459		/* PCIe Protocol Level Reset*/
460		req->MsgFlags =
461		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
462	}
463
464	cm->cm_targ = targ;
465	cm->cm_data = NULL;
466	cm->cm_complete = mprsas_remove_volume;
467	cm->cm_complete_data = (void *)(uintptr_t)handle;
468
469	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
470	    __func__, targ->tid);
471	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
472
473	mpr_map_command(sc, cm);
474}
475
476/*
477 * The firmware performs debounce on the link to avoid transient link errors
478 * and false removals.  When it does decide that link has been lost and a
479 * device needs to go away, it expects that the host will perform a target reset
480 * and then an op remove.  The reset has the side-effect of aborting any
481 * outstanding requests for the device, which is required for the op-remove to
482 * succeed.  It's not clear if the host should check for the device coming back
483 * alive after the reset.
484 */
485void
486mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
487{
488	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489	struct mpr_softc *sc;
490	struct mpr_command *tm;
491	struct mprsas_target *targ = NULL;
492
493	MPR_FUNCTRACE(sassc->sc);
494
495	sc = sassc->sc;
496
497	targ = mprsas_find_target_by_handle(sassc, 0, handle);
498	if (targ == NULL) {
499		/* FIXME: what is the action? */
500		/* We don't know about this device? */
501		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
502		    __func__, handle);
503		return;
504	}
505
506	targ->flags |= MPRSAS_TARGET_INREMOVAL;
507
508	tm = mprsas_alloc_tm(sc);
509	if (tm == NULL) {
510		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
511		    __func__);
512		return;
513	}
514
515	mprsas_rescan_target(sc, targ);
516
517	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
518	memset(req, 0, sizeof(*req));
519	req->DevHandle = htole16(targ->handle);
520	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
521
522	/* SAS Hard Link Reset / SATA Link Reset */
523	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
524
525	tm->cm_targ = targ;
526	tm->cm_data = NULL;
527	tm->cm_complete = mprsas_remove_device;
528	tm->cm_complete_data = (void *)(uintptr_t)handle;
529
530	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
531	    __func__, targ->tid);
532	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
533
534	mpr_map_command(sc, tm);
535}
536
537static void
538mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
539{
540	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
541	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
542	struct mprsas_target *targ;
543	uint16_t handle;
544
545	MPR_FUNCTRACE(sc);
546
547	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
549	targ = tm->cm_targ;
550
551	/*
552	 * Currently there should be no way we can hit this case.  It only
553	 * happens when we have a failure to allocate chain frames, and
554	 * task management commands don't have S/G lists.
555	 */
556	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
557		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
558		    "handle %#04x! This should not happen!\n", __func__,
559		    tm->cm_flags, handle);
560	}
561
562	if (reply == NULL) {
563		/* XXX retry the remove after the diag reset completes? */
564		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
565		    "0x%04x\n", __func__, handle);
566		mprsas_free_tm(sc, tm);
567		return;
568	}
569
570	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
571	    MPI2_IOCSTATUS_SUCCESS) {
572		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
573		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
574	}
575
576	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
577	    le32toh(reply->TerminationCount));
578	mpr_free_reply(sc, tm->cm_reply_data);
579	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
580
581	/* Reuse the existing command */
582	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
583	memset(req, 0, sizeof(*req));
584	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
585	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
586	req->DevHandle = htole16(handle);
587	tm->cm_data = NULL;
588	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
589	tm->cm_complete = mprsas_remove_complete;
590	tm->cm_complete_data = (void *)(uintptr_t)handle;
591
592	/*
593	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
594	 * They should be aborted or time out and we'll kick thus off there
595	 * if so.
596	 */
597	if (TAILQ_FIRST(&targ->commands) == NULL) {
598		mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
599		mpr_map_command(sc, tm);
600		targ->pending_remove_tm = NULL;
601	} else {
602		targ->pending_remove_tm = tm;
603	}
604
605	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606	    targ->tid, handle);
607	if (targ->encl_level_valid) {
608		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
609		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
610		    targ->connector_name);
611	}
612}
613
614static void
615mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
616{
617	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618	uint16_t handle;
619	struct mprsas_target *targ;
620	struct mprsas_lun *lun;
621
622	MPR_FUNCTRACE(sc);
623
624	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
626
627	targ = tm->cm_targ;
628
629	/*
630	 * At this point, we should have no pending commands for the target.
631	 * The remove target has just completed.
632	 */
633	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
634	    ("%s: no commands should be pending\n", __func__));
635
636	/*
637	 * Currently there should be no way we can hit this case.  It only
638	 * happens when we have a failure to allocate chain frames, and
639	 * task management commands don't have S/G lists.
640	 */
641	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
642		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
643		    "handle %#04x! This should not happen!\n", __func__,
644		    tm->cm_flags, handle);
645		mprsas_free_tm(sc, tm);
646		return;
647	}
648
649	if (reply == NULL) {
650		/* most likely a chip reset */
651		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
652		    "0x%04x\n", __func__, handle);
653		mprsas_free_tm(sc, tm);
654		return;
655	}
656
657	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
658	    __func__, handle, le16toh(reply->IOCStatus));
659
660	/*
661	 * Don't clear target if remove fails because things will get confusing.
662	 * Leave the devname and sasaddr intact so that we know to avoid reusing
663	 * this target id if possible, and so we can assign the same target id
664	 * to this device if it comes back in the future.
665	 */
666	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667	    MPI2_IOCSTATUS_SUCCESS) {
668		targ->handle = 0x0;
669		targ->encl_handle = 0x0;
670		targ->encl_level_valid = 0x0;
671		targ->encl_level = 0x0;
672		targ->connector_name[0] = ' ';
673		targ->connector_name[1] = ' ';
674		targ->connector_name[2] = ' ';
675		targ->connector_name[3] = ' ';
676		targ->encl_slot = 0x0;
677		targ->exp_dev_handle = 0x0;
678		targ->phy_num = 0x0;
679		targ->linkrate = 0x0;
680		targ->devinfo = 0x0;
681		targ->flags = 0x0;
682		targ->scsi_req_desc_type = 0;
683
684		while (!SLIST_EMPTY(&targ->luns)) {
685			lun = SLIST_FIRST(&targ->luns);
686			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
687			free(lun, M_MPR);
688		}
689	}
690
691	mprsas_free_tm(sc, tm);
692}
693
694static int
695mprsas_register_events(struct mpr_softc *sc)
696{
697	uint8_t events[16];
698
699	bzero(events, 16);
700	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
701	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
702	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
703	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
704	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
705	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
706	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
707	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
708	setbit(events, MPI2_EVENT_IR_VOLUME);
709	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
710	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
711	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
712	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
713	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
714		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
715		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
716			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
717			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
718			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
719		}
720	}
721
722	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
723	    &sc->sassc->mprsas_eh);
724
725	return (0);
726}
727
728int
729mpr_attach_sas(struct mpr_softc *sc)
730{
731	struct mprsas_softc *sassc;
732	cam_status status;
733	int unit, error = 0, reqs;
734
735	MPR_FUNCTRACE(sc);
736	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
737
738	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
739	if (!sassc) {
740		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
741		    "Cannot allocate SAS subsystem memory\n");
742		return (ENOMEM);
743	}
744
745	/*
746	 * XXX MaxTargets could change during a reinit.  Since we don't
747	 * resize the targets[] array during such an event, cache the value
748	 * of MaxTargets here so that we don't get into trouble later.  This
749	 * should move into the reinit logic.
750	 */
751	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
752	sassc->targets = malloc(sizeof(struct mprsas_target) *
753	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
754	if (!sassc->targets) {
755		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
756		    "Cannot allocate SAS target memory\n");
757		free(sassc, M_MPR);
758		return (ENOMEM);
759	}
760	sc->sassc = sassc;
761	sassc->sc = sc;
762
763	reqs = sc->num_reqs - sc->num_prireqs - 1;
764	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
765		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
766		error = ENOMEM;
767		goto out;
768	}
769
770	unit = device_get_unit(sc->mpr_dev);
771	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
772	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
773	if (sassc->sim == NULL) {
774		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
775		error = EINVAL;
776		goto out;
777	}
778
779	TAILQ_INIT(&sassc->ev_queue);
780
781	/* Initialize taskqueue for Event Handling */
782	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
783	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
784	    taskqueue_thread_enqueue, &sassc->ev_tq);
785	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
786	    device_get_nameunit(sc->mpr_dev));
787
788	mpr_lock(sc);
789
790	/*
791	 * XXX There should be a bus for every port on the adapter, but since
792	 * we're just going to fake the topology for now, we'll pretend that
793	 * everything is just a target on a single bus.
794	 */
795	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
796		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
797		    "Error %d registering SCSI bus\n", error);
798		mpr_unlock(sc);
799		goto out;
800	}
801
802	/*
803	 * Assume that discovery events will start right away.
804	 *
805	 * Hold off boot until discovery is complete.
806	 */
807	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
808	sc->sassc->startup_refcount = 0;
809	mprsas_startup_increment(sassc);
810
811	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
812
813	/*
814	 * Register for async events so we can determine the EEDP
815	 * capabilities of devices.
816	 */
817	status = xpt_create_path(&sassc->path, /*periph*/NULL,
818	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
819	    CAM_LUN_WILDCARD);
820	if (status != CAM_REQ_CMP) {
821		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
822		    "Error %#x creating sim path\n", status);
823		sassc->path = NULL;
824	} else {
825		int event;
826
827		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
828		status = xpt_register_async(event, mprsas_async, sc,
829					    sassc->path);
830
831		if (status != CAM_REQ_CMP) {
832			mpr_dprint(sc, MPR_ERROR,
833			    "Error %#x registering async handler for "
834			    "AC_ADVINFO_CHANGED events\n", status);
835			xpt_free_path(sassc->path);
836			sassc->path = NULL;
837		}
838	}
839	if (status != CAM_REQ_CMP) {
840		/*
841		 * EEDP use is the exception, not the rule.
842		 * Warn the user, but do not fail to attach.
843		 */
844		mpr_printf(sc, "EEDP capabilities disabled.\n");
845	}
846
847	mpr_unlock(sc);
848
849	mprsas_register_events(sc);
850out:
851	if (error)
852		mpr_detach_sas(sc);
853
854	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
855	return (error);
856}
857
858int
859mpr_detach_sas(struct mpr_softc *sc)
860{
861	struct mprsas_softc *sassc;
862	struct mprsas_lun *lun, *lun_tmp;
863	struct mprsas_target *targ;
864	int i;
865
866	MPR_FUNCTRACE(sc);
867
868	if (sc->sassc == NULL)
869		return (0);
870
871	sassc = sc->sassc;
872	mpr_deregister_events(sc, sassc->mprsas_eh);
873
874	/*
875	 * Drain and free the event handling taskqueue with the lock
876	 * unheld so that any parallel processing tasks drain properly
877	 * without deadlocking.
878	 */
879	if (sassc->ev_tq != NULL)
880		taskqueue_free(sassc->ev_tq);
881
882	/* Make sure CAM doesn't wedge if we had to bail out early. */
883	mpr_lock(sc);
884
885	while (sassc->startup_refcount != 0)
886		mprsas_startup_decrement(sassc);
887
888	/* Deregister our async handler */
889	if (sassc->path != NULL) {
890		xpt_register_async(0, mprsas_async, sc, sassc->path);
891		xpt_free_path(sassc->path);
892		sassc->path = NULL;
893	}
894
895	if (sassc->flags & MPRSAS_IN_STARTUP)
896		xpt_release_simq(sassc->sim, 1);
897
898	if (sassc->sim != NULL) {
899		xpt_bus_deregister(cam_sim_path(sassc->sim));
900		cam_sim_free(sassc->sim, FALSE);
901	}
902
903	mpr_unlock(sc);
904
905	if (sassc->devq != NULL)
906		cam_simq_free(sassc->devq);
907
908	for (i = 0; i < sassc->maxtargets; i++) {
909		targ = &sassc->targets[i];
910		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
911			free(lun, M_MPR);
912		}
913	}
914	free(sassc->targets, M_MPR);
915	free(sassc, M_MPR);
916	sc->sassc = NULL;
917
918	return (0);
919}
920
921void
922mprsas_discovery_end(struct mprsas_softc *sassc)
923{
924	struct mpr_softc *sc = sassc->sc;
925
926	MPR_FUNCTRACE(sc);
927
928	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
929		callout_stop(&sassc->discovery_callout);
930
931	/*
932	 * After discovery has completed, check the mapping table for any
933	 * missing devices and update their missing counts. Only do this once
934	 * whenever the driver is initialized so that missing counts aren't
935	 * updated unnecessarily. Note that just because discovery has
936	 * completed doesn't mean that events have been processed yet. The
937	 * check_devices function is a callout timer that checks if ALL devices
938	 * are missing. If so, it will wait a little longer for events to
939	 * complete and keep resetting itself until some device in the mapping
940	 * table is not missing, meaning that event processing has started.
941	 */
942	if (sc->track_mapping_events) {
943		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
944		    "completed. Check for missing devices in the mapping "
945		    "table.\n");
946		callout_reset(&sc->device_check_callout,
947		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
948		    sc);
949	}
950}
951
952static void
953mprsas_action(struct cam_sim *sim, union ccb *ccb)
954{
955	struct mprsas_softc *sassc;
956
957	sassc = cam_sim_softc(sim);
958
959	MPR_FUNCTRACE(sassc->sc);
960	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
961	    ccb->ccb_h.func_code);
962	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
963
964	switch (ccb->ccb_h.func_code) {
965	case XPT_PATH_INQ:
966	{
967		struct ccb_pathinq *cpi = &ccb->cpi;
968		struct mpr_softc *sc = sassc->sc;
969
970		cpi->version_num = 1;
971		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
972		cpi->target_sprt = 0;
973		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
974		cpi->hba_eng_cnt = 0;
975		cpi->max_target = sassc->maxtargets - 1;
976		cpi->max_lun = 255;
977
978		/*
979		 * initiator_id is set here to an ID outside the set of valid
980		 * target IDs (including volumes).
981		 */
982		cpi->initiator_id = sassc->maxtargets;
983		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
984		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
985		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
986		cpi->unit_number = cam_sim_unit(sim);
987		cpi->bus_id = cam_sim_bus(sim);
988		/*
989		 * XXXSLM-I think this needs to change based on config page or
990		 * something instead of hardcoded to 150000.
991		 */
992		cpi->base_transfer_speed = 150000;
993		cpi->transport = XPORT_SAS;
994		cpi->transport_version = 0;
995		cpi->protocol = PROTO_SCSI;
996		cpi->protocol_version = SCSI_REV_SPC;
997		cpi->maxio = sc->maxio;
998		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
999		break;
1000	}
1001	case XPT_GET_TRAN_SETTINGS:
1002	{
1003		struct ccb_trans_settings	*cts;
1004		struct ccb_trans_settings_sas	*sas;
1005		struct ccb_trans_settings_scsi	*scsi;
1006		struct mprsas_target *targ;
1007
1008		cts = &ccb->cts;
1009		sas = &cts->xport_specific.sas;
1010		scsi = &cts->proto_specific.scsi;
1011
1012		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1013		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1014		    cts->ccb_h.target_id));
1015		targ = &sassc->targets[cts->ccb_h.target_id];
1016		if (targ->handle == 0x0) {
1017			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1018			break;
1019		}
1020
1021		cts->protocol_version = SCSI_REV_SPC2;
1022		cts->transport = XPORT_SAS;
1023		cts->transport_version = 0;
1024
1025		sas->valid = CTS_SAS_VALID_SPEED;
1026		switch (targ->linkrate) {
1027		case 0x08:
1028			sas->bitrate = 150000;
1029			break;
1030		case 0x09:
1031			sas->bitrate = 300000;
1032			break;
1033		case 0x0a:
1034			sas->bitrate = 600000;
1035			break;
1036		case 0x0b:
1037			sas->bitrate = 1200000;
1038			break;
1039		default:
1040			sas->valid = 0;
1041		}
1042
1043		cts->protocol = PROTO_SCSI;
1044		scsi->valid = CTS_SCSI_VALID_TQ;
1045		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1046
1047		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1048		break;
1049	}
1050	case XPT_CALC_GEOMETRY:
1051		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1052		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1053		break;
1054	case XPT_RESET_DEV:
1055		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1056		    "XPT_RESET_DEV\n");
1057		mprsas_action_resetdev(sassc, ccb);
1058		return;
1059	case XPT_RESET_BUS:
1060	case XPT_ABORT:
1061	case XPT_TERM_IO:
1062		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1063		    "for abort or reset\n");
1064		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1065		break;
1066	case XPT_SCSI_IO:
1067		mprsas_action_scsiio(sassc, ccb);
1068		return;
1069	case XPT_SMP_IO:
1070		mprsas_action_smpio(sassc, ccb);
1071		return;
1072	default:
1073		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1074		break;
1075	}
1076	xpt_done(ccb);
1077
1078}
1079
1080static void
1081mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1082    target_id_t target_id, lun_id_t lun_id)
1083{
1084	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1085	struct cam_path *path;
1086
1087	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1088	    ac_code, target_id, (uintmax_t)lun_id);
1089
1090	if (xpt_create_path(&path, NULL,
1091		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1092		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1093		    "notification\n");
1094		return;
1095	}
1096
1097	xpt_async(ac_code, path, NULL);
1098	xpt_free_path(path);
1099}
1100
1101static void
1102mprsas_complete_all_commands(struct mpr_softc *sc)
1103{
1104	struct mpr_command *cm;
1105	int i;
1106	int completed;
1107
1108	MPR_FUNCTRACE(sc);
1109	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1110
1111	/* complete all commands with a NULL reply */
1112	for (i = 1; i < sc->num_reqs; i++) {
1113		cm = &sc->commands[i];
1114		if (cm->cm_state == MPR_CM_STATE_FREE)
1115			continue;
1116
1117		cm->cm_state = MPR_CM_STATE_BUSY;
1118		cm->cm_reply = NULL;
1119		completed = 0;
1120
1121		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1122			MPASS(cm->cm_data);
1123			free(cm->cm_data, M_MPR);
1124			cm->cm_data = NULL;
1125		}
1126
1127		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1128			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1129
1130		if (cm->cm_complete != NULL) {
1131			mprsas_log_command(cm, MPR_RECOVERY,
1132			    "completing cm %p state %x ccb %p for diag reset\n",
1133			    cm, cm->cm_state, cm->cm_ccb);
1134			cm->cm_complete(sc, cm);
1135			completed = 1;
1136		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1137			mprsas_log_command(cm, MPR_RECOVERY,
1138			    "waking up cm %p state %x ccb %p for diag reset\n",
1139			    cm, cm->cm_state, cm->cm_ccb);
1140			wakeup(cm);
1141			completed = 1;
1142		}
1143
1144		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1145			/* this should never happen, but if it does, log */
1146			mprsas_log_command(cm, MPR_RECOVERY,
1147			    "cm %p state %x flags 0x%x ccb %p during diag "
1148			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1149			    cm->cm_ccb);
1150		}
1151	}
1152
1153	sc->io_cmds_active = 0;
1154}
1155
1156void
1157mprsas_handle_reinit(struct mpr_softc *sc)
1158{
1159	int i;
1160
1161	/* Go back into startup mode and freeze the simq, so that CAM
1162	 * doesn't send any commands until after we've rediscovered all
1163	 * targets and found the proper device handles for them.
1164	 *
1165	 * After the reset, portenable will trigger discovery, and after all
1166	 * discovery-related activities have finished, the simq will be
1167	 * released.
1168	 */
1169	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1170	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1171	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1172	mprsas_startup_increment(sc->sassc);
1173
1174	/* notify CAM of a bus reset */
1175	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1176	    CAM_LUN_WILDCARD);
1177
1178	/* complete and cleanup after all outstanding commands */
1179	mprsas_complete_all_commands(sc);
1180
1181	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1182	    __func__, sc->sassc->startup_refcount);
1183
1184	/* zero all the target handles, since they may change after the
1185	 * reset, and we have to rediscover all the targets and use the new
1186	 * handles.
1187	 */
1188	for (i = 0; i < sc->sassc->maxtargets; i++) {
1189		if (sc->sassc->targets[i].outstanding != 0)
1190			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1191			    i, sc->sassc->targets[i].outstanding);
1192		sc->sassc->targets[i].handle = 0x0;
1193		sc->sassc->targets[i].exp_dev_handle = 0x0;
1194		sc->sassc->targets[i].outstanding = 0;
1195		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1196	}
1197}
1198static void
1199mprsas_tm_timeout(void *data)
1200{
1201	struct mpr_command *tm = data;
1202	struct mpr_softc *sc = tm->cm_sc;
1203
1204	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1205
1206	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1207	    "out\n", tm);
1208
1209	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1210	    ("command not inqueue\n"));
1211
1212	tm->cm_state = MPR_CM_STATE_BUSY;
1213	mpr_reinit(sc);
1214}
1215
1216static void
1217mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1218{
1219	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1220	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1221	unsigned int cm_count = 0;
1222	struct mpr_command *cm;
1223	struct mprsas_target *targ;
1224
1225	callout_stop(&tm->cm_callout);
1226
1227	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1228	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1229	targ = tm->cm_targ;
1230
1231	/*
1232	 * Currently there should be no way we can hit this case.  It only
1233	 * happens when we have a failure to allocate chain frames, and
1234	 * task management commands don't have S/G lists.
1235	 */
1236	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1237		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1238		    "%s: cm_flags = %#x for LUN reset! "
1239		    "This should not happen!\n", __func__, tm->cm_flags);
1240		mprsas_free_tm(sc, tm);
1241		return;
1242	}
1243
1244	if (reply == NULL) {
1245		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1246		    tm);
1247		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1248			/* this completion was due to a reset, just cleanup */
1249			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1250			    "reset, ignoring NULL LUN reset reply\n");
1251			targ->tm = NULL;
1252			mprsas_free_tm(sc, tm);
1253		}
1254		else {
1255			/* we should have gotten a reply. */
1256			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1257			    "LUN reset attempt, resetting controller\n");
1258			mpr_reinit(sc);
1259		}
1260		return;
1261	}
1262
1263	mpr_dprint(sc, MPR_RECOVERY,
1264	    "logical unit reset status 0x%x code 0x%x count %u\n",
1265	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1266	    le32toh(reply->TerminationCount));
1267
1268	/*
1269	 * See if there are any outstanding commands for this LUN.
1270	 * This could be made more efficient by using a per-LU data
1271	 * structure of some sort.
1272	 */
1273	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1274		if (cm->cm_lun == tm->cm_lun)
1275			cm_count++;
1276	}
1277
1278	if (cm_count == 0) {
1279		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1280		    "Finished recovery after LUN reset for target %u\n",
1281		    targ->tid);
1282
1283		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1284		    tm->cm_lun);
1285
1286		/*
1287		 * We've finished recovery for this logical unit.  check and
1288		 * see if some other logical unit has a timedout command
1289		 * that needs to be processed.
1290		 */
1291		cm = TAILQ_FIRST(&targ->timedout_commands);
1292		if (cm) {
1293			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1294			   "More commands to abort for target %u\n", targ->tid);
1295			mprsas_send_abort(sc, tm, cm);
1296		} else {
1297			targ->tm = NULL;
1298			mprsas_free_tm(sc, tm);
1299		}
1300	} else {
1301		/* if we still have commands for this LUN, the reset
1302		 * effectively failed, regardless of the status reported.
1303		 * Escalate to a target reset.
1304		 */
1305		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1306		    "logical unit reset complete for target %u, but still "
1307		    "have %u command(s), sending target reset\n", targ->tid,
1308		    cm_count);
1309		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1310			mprsas_send_reset(sc, tm,
1311			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1312		else
1313			mpr_reinit(sc);
1314	}
1315}
1316
1317static void
1318mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1319{
1320	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1321	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1322	struct mprsas_target *targ;
1323
1324	callout_stop(&tm->cm_callout);
1325
1326	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1327	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1328	targ = tm->cm_targ;
1329
1330	/*
1331	 * Currently there should be no way we can hit this case.  It only
1332	 * happens when we have a failure to allocate chain frames, and
1333	 * task management commands don't have S/G lists.
1334	 */
1335	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1336		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1337		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1338		mprsas_free_tm(sc, tm);
1339		return;
1340	}
1341
1342	if (reply == NULL) {
1343		mpr_dprint(sc, MPR_RECOVERY,
1344		    "NULL target reset reply for tm %p TaskMID %u\n",
1345		    tm, le16toh(req->TaskMID));
1346		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1347			/* this completion was due to a reset, just cleanup */
1348			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1349			    "reset, ignoring NULL target reset reply\n");
1350			targ->tm = NULL;
1351			mprsas_free_tm(sc, tm);
1352		}
1353		else {
1354			/* we should have gotten a reply. */
1355			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1356			    "target reset attempt, resetting controller\n");
1357			mpr_reinit(sc);
1358		}
1359		return;
1360	}
1361
1362	mpr_dprint(sc, MPR_RECOVERY,
1363	    "target reset status 0x%x code 0x%x count %u\n",
1364	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1365	    le32toh(reply->TerminationCount));
1366
1367	if (targ->outstanding == 0) {
1368		/*
1369		 * We've finished recovery for this target and all
1370		 * of its logical units.
1371		 */
1372		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1373		    "Finished reset recovery for target %u\n", targ->tid);
1374
1375		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1376		    CAM_LUN_WILDCARD);
1377
1378		targ->tm = NULL;
1379		mprsas_free_tm(sc, tm);
1380	} else {
1381		/*
1382		 * After a target reset, if this target still has
1383		 * outstanding commands, the reset effectively failed,
1384		 * regardless of the status reported.  escalate.
1385		 */
1386		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1387		    "Target reset complete for target %u, but still have %u "
1388		    "command(s), resetting controller\n", targ->tid,
1389		    targ->outstanding);
1390		mpr_reinit(sc);
1391	}
1392}
1393
1394#define MPR_RESET_TIMEOUT 30
1395
1396int
1397mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1398{
1399	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1400	struct mprsas_target *target;
1401	int err, timeout;
1402
1403	target = tm->cm_targ;
1404	if (target->handle == 0) {
1405		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1406		    "%d\n", __func__, target->tid);
1407		return -1;
1408	}
1409
1410	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1411	req->DevHandle = htole16(target->handle);
1412	req->TaskType = type;
1413
1414	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1415		timeout = MPR_RESET_TIMEOUT;
1416		/*
1417		 * Target reset method =
1418		 *     SAS Hard Link Reset / SATA Link Reset
1419		 */
1420		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1421	} else {
1422		timeout = (target->controller_reset_timeout) ? (
1423		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1424		/* PCIe Protocol Level Reset*/
1425		req->MsgFlags =
1426		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1427	}
1428
1429	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1430		/* XXX Need to handle invalid LUNs */
1431		MPR_SET_LUN(req->LUN, tm->cm_lun);
1432		tm->cm_targ->logical_unit_resets++;
1433		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1434		    "Sending logical unit reset to target %u lun %d\n",
1435		    target->tid, tm->cm_lun);
1436		tm->cm_complete = mprsas_logical_unit_reset_complete;
1437		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1438	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1439		tm->cm_targ->target_resets++;
1440		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1441		    "Sending target reset to target %u\n", target->tid);
1442		tm->cm_complete = mprsas_target_reset_complete;
1443		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1444	}
1445	else {
1446		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1447		return -1;
1448	}
1449
1450	if (target->encl_level_valid) {
1451		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1452		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1453		    target->encl_level, target->encl_slot,
1454		    target->connector_name);
1455	}
1456
1457	tm->cm_data = NULL;
1458	tm->cm_complete_data = (void *)tm;
1459
1460	callout_reset(&tm->cm_callout, timeout * hz,
1461	    mprsas_tm_timeout, tm);
1462
1463	err = mpr_map_command(sc, tm);
1464	if (err)
1465		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1466		    "error %d sending reset type %u\n", err, type);
1467
1468	return err;
1469}
1470
1471
1472static void
1473mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1474{
1475	struct mpr_command *cm;
1476	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1477	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1478	struct mprsas_target *targ;
1479
1480	callout_stop(&tm->cm_callout);
1481
1482	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1483	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1484	targ = tm->cm_targ;
1485
1486	/*
1487	 * Currently there should be no way we can hit this case.  It only
1488	 * happens when we have a failure to allocate chain frames, and
1489	 * task management commands don't have S/G lists.
1490	 */
1491	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1492		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1493		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1494		    tm->cm_flags, tm, le16toh(req->TaskMID));
1495		mprsas_free_tm(sc, tm);
1496		return;
1497	}
1498
1499	if (reply == NULL) {
1500		mpr_dprint(sc, MPR_RECOVERY,
1501		    "NULL abort reply for tm %p TaskMID %u\n",
1502		    tm, le16toh(req->TaskMID));
1503		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1504			/* this completion was due to a reset, just cleanup */
1505			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1506			    "reset, ignoring NULL abort reply\n");
1507			targ->tm = NULL;
1508			mprsas_free_tm(sc, tm);
1509		} else {
1510			/* we should have gotten a reply. */
1511			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1512			    "abort attempt, resetting controller\n");
1513			mpr_reinit(sc);
1514		}
1515		return;
1516	}
1517
1518	mpr_dprint(sc, MPR_RECOVERY,
1519	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1520	    le16toh(req->TaskMID),
1521	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1522	    le32toh(reply->TerminationCount));
1523
1524	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1525	if (cm == NULL) {
1526		/*
1527		 * if there are no more timedout commands, we're done with
1528		 * error recovery for this target.
1529		 */
1530		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1531		    "Finished abort recovery for target %u\n", targ->tid);
1532		targ->tm = NULL;
1533		mprsas_free_tm(sc, tm);
1534	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1535		/* abort success, but we have more timedout commands to abort */
1536		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1537		    "Continuing abort recovery for target %u\n", targ->tid);
1538		mprsas_send_abort(sc, tm, cm);
1539	} else {
1540		/*
1541		 * we didn't get a command completion, so the abort
1542		 * failed as far as we're concerned.  escalate.
1543		 */
1544		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1545		    "Abort failed for target %u, sending logical unit reset\n",
1546		    targ->tid);
1547
1548		mprsas_send_reset(sc, tm,
1549		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1550	}
1551}
1552
1553#define MPR_ABORT_TIMEOUT 5
1554
1555static int
1556mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1557    struct mpr_command *cm)
1558{
1559	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1560	struct mprsas_target *targ;
1561	int err, timeout;
1562
1563	targ = cm->cm_targ;
1564	if (targ->handle == 0) {
1565		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1566		   "%s null devhandle for target_id %d\n",
1567		    __func__, cm->cm_ccb->ccb_h.target_id);
1568		return -1;
1569	}
1570
1571	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1572	    "Aborting command %p\n", cm);
1573
1574	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1575	req->DevHandle = htole16(targ->handle);
1576	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1577
1578	/* XXX Need to handle invalid LUNs */
1579	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1580
1581	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1582
1583	tm->cm_data = NULL;
1584	tm->cm_complete = mprsas_abort_complete;
1585	tm->cm_complete_data = (void *)tm;
1586	tm->cm_targ = cm->cm_targ;
1587	tm->cm_lun = cm->cm_lun;
1588
1589	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1590		timeout	= MPR_ABORT_TIMEOUT;
1591	else
1592		timeout = sc->nvme_abort_timeout;
1593
1594	callout_reset(&tm->cm_callout, timeout * hz,
1595	    mprsas_tm_timeout, tm);
1596
1597	targ->aborts++;
1598
1599	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1600
1601	err = mpr_map_command(sc, tm);
1602	if (err)
1603		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1604		    "error %d sending abort for cm %p SMID %u\n",
1605		    err, cm, req->TaskMID);
1606	return err;
1607}
1608
1609static void
1610mprsas_scsiio_timeout(void *data)
1611{
1612	sbintime_t elapsed, now;
1613	union ccb *ccb;
1614	struct mpr_softc *sc;
1615	struct mpr_command *cm;
1616	struct mprsas_target *targ;
1617
1618	cm = (struct mpr_command *)data;
1619	sc = cm->cm_sc;
1620	ccb = cm->cm_ccb;
1621	now = sbinuptime();
1622
1623	MPR_FUNCTRACE(sc);
1624	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1625
1626	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1627
1628	/*
1629	 * Run the interrupt handler to make sure it's not pending.  This
1630	 * isn't perfect because the command could have already completed
1631	 * and been re-used, though this is unlikely.
1632	 */
1633	mpr_intr_locked(sc);
1634	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1635		mprsas_log_command(cm, MPR_XINFO,
1636		    "SCSI command %p almost timed out\n", cm);
1637		return;
1638	}
1639
1640	if (cm->cm_ccb == NULL) {
1641		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1642		return;
1643	}
1644
1645	targ = cm->cm_targ;
1646	targ->timeouts++;
1647
1648	elapsed = now - ccb->ccb_h.qos.sim_data;
1649	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1650	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1651	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1652	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1653	if (targ->encl_level_valid) {
1654		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1655		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1656		    targ->encl_level, targ->encl_slot, targ->connector_name);
1657	}
1658
1659	/* XXX first, check the firmware state, to see if it's still
1660	 * operational.  if not, do a diag reset.
1661	 */
1662	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1663	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1664	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1665
1666	if (targ->tm != NULL) {
1667		/* target already in recovery, just queue up another
1668		 * timedout command to be processed later.
1669		 */
1670		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1671		    "processing by tm %p\n", cm, targ->tm);
1672	}
1673	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1674
1675		/* start recovery by aborting the first timedout command */
1676		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1677		    "Sending abort to target %u for SMID %d\n", targ->tid,
1678		    cm->cm_desc.Default.SMID);
1679		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1680		    cm, targ->tm);
1681		mprsas_send_abort(sc, targ->tm, cm);
1682	}
1683	else {
1684		/* XXX queue this target up for recovery once a TM becomes
1685		 * available.  The firmware only has a limited number of
1686		 * HighPriority credits for the high priority requests used
1687		 * for task management, and we ran out.
1688		 *
1689		 * Isilon: don't worry about this for now, since we have
1690		 * more credits than disks in an enclosure, and limit
1691		 * ourselves to one TM per target for recovery.
1692		 */
1693		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1694		    "timedout cm %p failed to allocate a tm\n", cm);
1695	}
1696}
1697
1698/**
1699 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1700 *			     to SCSI Unmap.
1701 * Return 0 - for success,
1702 *	  1 - to immediately return back the command with success status to CAM
1703 *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1704 *			   to FW without any translation.
1705 */
1706static int
1707mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1708    union ccb *ccb, struct mprsas_target *targ)
1709{
1710	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1711	struct ccb_scsiio *csio;
1712	struct unmap_parm_list *plist;
1713	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1714	struct nvme_command *c;
1715	int i, res;
1716	uint16_t ndesc, list_len, data_length;
1717	struct mpr_prp_page *prp_page_info;
1718	uint64_t nvme_dsm_ranges_dma_handle;
1719
1720	csio = &ccb->csio;
1721	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1722	if (!list_len) {
1723		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1724		return -EINVAL;
1725	}
1726
1727	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1728	if (!plist) {
1729		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1730		    "save UNMAP data\n");
1731		return -ENOMEM;
1732	}
1733
1734	/* Copy SCSI unmap data to a local buffer */
1735	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1736
1737	/* return back the unmap command to CAM with success status,
1738	 * if number of descripts is zero.
1739	 */
1740	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1741	if (!ndesc) {
1742		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1743		    "UNMAP cmd is Zero\n");
1744		res = 1;
1745		goto out;
1746	}
1747
1748	data_length = ndesc * sizeof(struct nvme_dsm_range);
1749	if (data_length > targ->MDTS) {
1750		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1751		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1752		res = -EINVAL;
1753		goto out;
1754	}
1755
1756	prp_page_info = mpr_alloc_prp_page(sc);
1757	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1758	    "UNMAP command.\n", __func__));
1759
1760	/*
1761	 * Insert the allocated PRP page into the command's PRP page list. This
1762	 * will be freed when the command is freed.
1763	 */
1764	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1765
1766	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1767	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1768
1769	bzero(nvme_dsm_ranges, data_length);
1770
1771	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1772	 * for each descriptors contained in SCSI UNMAP data.
1773	 */
1774	for (i = 0; i < ndesc; i++) {
1775		nvme_dsm_ranges[i].length =
1776		    htole32(be32toh(plist->desc[i].nlb));
1777		nvme_dsm_ranges[i].starting_lba =
1778		    htole64(be64toh(plist->desc[i].slba));
1779		nvme_dsm_ranges[i].attributes = 0;
1780	}
1781
1782	/* Build MPI2.6's NVMe Encapsulated Request Message */
1783	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1784	bzero(req, sizeof(*req));
1785	req->DevHandle = htole16(targ->handle);
1786	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1787	req->Flags = MPI26_NVME_FLAGS_WRITE;
1788	req->ErrorResponseBaseAddress.High =
1789	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1790	req->ErrorResponseBaseAddress.Low =
1791	    htole32(cm->cm_sense_busaddr);
1792	req->ErrorResponseAllocationLength =
1793	    htole16(sizeof(struct nvme_completion));
1794	req->EncapsulatedCommandLength =
1795	    htole16(sizeof(struct nvme_command));
1796	req->DataLength = htole32(data_length);
1797
1798	/* Build NVMe DSM command */
1799	c = (struct nvme_command *) req->NVMe_Command;
1800	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1801	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1802	c->cdw10 = htole32(ndesc - 1);
1803	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1804
1805	cm->cm_length = data_length;
1806	cm->cm_data = NULL;
1807
1808	cm->cm_complete = mprsas_scsiio_complete;
1809	cm->cm_complete_data = ccb;
1810	cm->cm_targ = targ;
1811	cm->cm_lun = csio->ccb_h.target_lun;
1812	cm->cm_ccb = ccb;
1813
1814	cm->cm_desc.Default.RequestFlags =
1815	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1816
1817	csio->ccb_h.qos.sim_data = sbinuptime();
1818	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1819	    mprsas_scsiio_timeout, cm, 0);
1820
1821	targ->issued++;
1822	targ->outstanding++;
1823	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1824	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1825
1826	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1827	    __func__, cm, ccb, targ->outstanding);
1828
1829	mpr_build_nvme_prp(sc, cm, req,
1830	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1831	mpr_map_command(sc, cm);
1832
1833out:
1834	free(plist, M_MPR);
1835	return 0;
1836}
1837
1838static void
1839mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1840{
1841	MPI2_SCSI_IO_REQUEST *req;
1842	struct ccb_scsiio *csio;
1843	struct mpr_softc *sc;
1844	struct mprsas_target *targ;
1845	struct mprsas_lun *lun;
1846	struct mpr_command *cm;
1847	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1848	uint16_t eedp_flags;
1849	uint32_t mpi_control;
1850	int rc;
1851
1852	sc = sassc->sc;
1853	MPR_FUNCTRACE(sc);
1854	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1855
1856	csio = &ccb->csio;
1857	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1858	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1859	     csio->ccb_h.target_id));
1860	targ = &sassc->targets[csio->ccb_h.target_id];
1861	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1862	if (targ->handle == 0x0) {
1863		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1864		    __func__, csio->ccb_h.target_id);
1865		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1866		xpt_done(ccb);
1867		return;
1868	}
1869	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1870		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1871		    "supported %u\n", __func__, csio->ccb_h.target_id);
1872		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1873		xpt_done(ccb);
1874		return;
1875	}
1876	/*
1877	 * Sometimes, it is possible to get a command that is not "In
1878	 * Progress" and was actually aborted by the upper layer.  Check for
1879	 * this here and complete the command without error.
1880	 */
1881	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1882		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1883		    "target %u\n", __func__, csio->ccb_h.target_id);
1884		xpt_done(ccb);
1885		return;
1886	}
1887	/*
1888	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1889	 * that the volume has timed out.  We want volumes to be enumerated
1890	 * until they are deleted/removed, not just failed. In either event,
1891	 * we're removing the target due to a firmware event telling us
1892	 * the device is now gone (as opposed to some transient event). Since
1893	 * we're opting to remove failed devices from the OS's view, we need
1894	 * to propagate that status up the stack.
1895	 */
1896	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1897		if (targ->devinfo == 0)
1898			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1899		else
1900			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1901		xpt_done(ccb);
1902		return;
1903	}
1904
1905	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1906		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1907		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1908		xpt_done(ccb);
1909		return;
1910	}
1911
1912	/*
1913	 * If target has a reset in progress, freeze the devq and return.  The
1914	 * devq will be released when the TM reset is finished.
1915	 */
1916	if (targ->flags & MPRSAS_TARGET_INRESET) {
1917		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1918		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1919		    __func__, targ->tid);
1920		xpt_freeze_devq(ccb->ccb_h.path, 1);
1921		xpt_done(ccb);
1922		return;
1923	}
1924
1925	cm = mpr_alloc_command(sc);
1926	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1927		if (cm != NULL) {
1928			mpr_free_command(sc, cm);
1929		}
1930		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1931			xpt_freeze_simq(sassc->sim, 1);
1932			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1933		}
1934		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1935		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1936		xpt_done(ccb);
1937		return;
1938	}
1939
1940	/* For NVME device's issue UNMAP command directly to NVME drives by
1941	 * constructing equivalent native NVMe DataSetManagement command.
1942	 */
1943	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1944	if (scsi_opcode == UNMAP &&
1945	    targ->is_nvme &&
1946	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1947		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1948		if (rc == 1) { /* return command to CAM with success status */
1949			mpr_free_command(sc, cm);
1950			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1951			xpt_done(ccb);
1952			return;
1953		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1954			return;
1955	}
1956
1957	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1958	bzero(req, sizeof(*req));
1959	req->DevHandle = htole16(targ->handle);
1960	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1961	req->MsgFlags = 0;
1962	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1963	req->SenseBufferLength = MPR_SENSE_LEN;
1964	req->SGLFlags = 0;
1965	req->ChainOffset = 0;
1966	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1967	req->SGLOffset1= 0;
1968	req->SGLOffset2= 0;
1969	req->SGLOffset3= 0;
1970	req->SkipCount = 0;
1971	req->DataLength = htole32(csio->dxfer_len);
1972	req->BidirectionalDataLength = 0;
1973	req->IoFlags = htole16(csio->cdb_len);
1974	req->EEDPFlags = 0;
1975
1976	/* Note: BiDirectional transfers are not supported */
1977	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1978	case CAM_DIR_IN:
1979		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1980		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1981		break;
1982	case CAM_DIR_OUT:
1983		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1984		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1985		break;
1986	case CAM_DIR_NONE:
1987	default:
1988		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1989		break;
1990	}
1991
1992	if (csio->cdb_len == 32)
1993		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1994	/*
1995	 * It looks like the hardware doesn't require an explicit tag
1996	 * number for each transaction.  SAM Task Management not supported
1997	 * at the moment.
1998	 */
1999	switch (csio->tag_action) {
2000	case MSG_HEAD_OF_Q_TAG:
2001		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2002		break;
2003	case MSG_ORDERED_Q_TAG:
2004		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2005		break;
2006	case MSG_ACA_TASK:
2007		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2008		break;
2009	case CAM_TAG_ACTION_NONE:
2010	case MSG_SIMPLE_Q_TAG:
2011	default:
2012		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2013		break;
2014	}
2015	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2016	req->Control = htole32(mpi_control);
2017
2018	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2019		mpr_free_command(sc, cm);
2020		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2021		xpt_done(ccb);
2022		return;
2023	}
2024
2025	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2026		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2027	else {
2028		KASSERT(csio->cdb_len <= IOCDBLEN,
2029		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2030		    "is not set", csio->cdb_len));
2031		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2032	}
2033	req->IoFlags = htole16(csio->cdb_len);
2034
2035	/*
2036	 * Check if EEDP is supported and enabled.  If it is then check if the
2037	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2038	 * is formatted for EEDP support.  If all of this is true, set CDB up
2039	 * for EEDP transfer.
2040	 */
2041	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2042	if (sc->eedp_enabled && eedp_flags) {
2043		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2044			if (lun->lun_id == csio->ccb_h.target_lun) {
2045				break;
2046			}
2047		}
2048
2049		if ((lun != NULL) && (lun->eedp_formatted)) {
2050			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2051			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2052			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2053			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2054			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2055				eedp_flags |=
2056				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2057			}
2058			req->EEDPFlags = htole16(eedp_flags);
2059
2060			/*
2061			 * If CDB less than 32, fill in Primary Ref Tag with
2062			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2063			 * already there.  Also, set protection bit.  FreeBSD
2064			 * currently does not support CDBs bigger than 16, but
2065			 * the code doesn't hurt, and will be here for the
2066			 * future.
2067			 */
2068			if (csio->cdb_len != 32) {
2069				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2070				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2071				    PrimaryReferenceTag;
2072				for (i = 0; i < 4; i++) {
2073					*ref_tag_addr =
2074					    req->CDB.CDB32[lba_byte + i];
2075					ref_tag_addr++;
2076				}
2077				req->CDB.EEDP32.PrimaryReferenceTag =
2078				    htole32(req->
2079				    CDB.EEDP32.PrimaryReferenceTag);
2080				req->CDB.EEDP32.PrimaryApplicationTagMask =
2081				    0xFFFF;
2082				req->CDB.CDB32[1] =
2083				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2084			} else {
2085				eedp_flags |=
2086				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2087				req->EEDPFlags = htole16(eedp_flags);
2088				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2089				    0x1F) | 0x20;
2090			}
2091		}
2092	}
2093
2094	cm->cm_length = csio->dxfer_len;
2095	if (cm->cm_length != 0) {
2096		cm->cm_data = ccb;
2097		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2098	} else {
2099		cm->cm_data = NULL;
2100	}
2101	cm->cm_sge = &req->SGL;
2102	cm->cm_sglsize = (32 - 24) * 4;
2103	cm->cm_complete = mprsas_scsiio_complete;
2104	cm->cm_complete_data = ccb;
2105	cm->cm_targ = targ;
2106	cm->cm_lun = csio->ccb_h.target_lun;
2107	cm->cm_ccb = ccb;
2108	/*
2109	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2110	 * and set descriptor type.
2111	 */
2112	if (targ->scsi_req_desc_type ==
2113	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2114		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2115		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2116		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2117		if (!sc->atomic_desc_capable) {
2118			cm->cm_desc.FastPathSCSIIO.DevHandle =
2119			    htole16(targ->handle);
2120		}
2121	} else {
2122		cm->cm_desc.SCSIIO.RequestFlags =
2123		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2124		if (!sc->atomic_desc_capable)
2125			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2126	}
2127
2128	csio->ccb_h.qos.sim_data = sbinuptime();
2129	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2130	    mprsas_scsiio_timeout, cm, 0);
2131
2132	targ->issued++;
2133	targ->outstanding++;
2134	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2135	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2136
2137	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2138	    __func__, cm, ccb, targ->outstanding);
2139
2140	mpr_map_command(sc, cm);
2141	return;
2142}
2143
2144/**
2145 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2146 */
2147static void
2148mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2149    Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2150{
2151	u32 response_info;
2152	u8 *response_bytes;
2153	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2154	    MPI2_IOCSTATUS_MASK;
2155	u8 scsi_state = mpi_reply->SCSIState;
2156	u8 scsi_status = mpi_reply->SCSIStatus;
2157	char *desc_ioc_state = NULL;
2158	char *desc_scsi_status = NULL;
2159	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2160
2161	if (log_info == 0x31170000)
2162		return;
2163
2164	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2165	     ioc_status);
2166	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2167	    scsi_status);
2168
2169	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2170	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2171	if (targ->encl_level_valid) {
2172		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2173		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2174		    targ->connector_name);
2175	}
2176
2177	/*
2178	 * We can add more detail about underflow data here
2179	 * TO-DO
2180	 */
2181	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2182	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2183	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2184	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2185
2186	if (sc->mpr_debug & MPR_XINFO &&
2187	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2188		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2189		scsi_sense_print(csio);
2190		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2191	}
2192
2193	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2194		response_info = le32toh(mpi_reply->ResponseInfo);
2195		response_bytes = (u8 *)&response_info;
2196		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2197		    response_bytes[0],
2198		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2199		    response_bytes[0]));
2200	}
2201}
2202
2203/** mprsas_nvme_trans_status_code
2204 *
2205 * Convert Native NVMe command error status to
2206 * equivalent SCSI error status.
2207 *
2208 * Returns appropriate scsi_status
2209 */
2210static u8
2211mprsas_nvme_trans_status_code(uint16_t nvme_status,
2212    struct mpr_command *cm)
2213{
2214	u8 status = MPI2_SCSI_STATUS_GOOD;
2215	int skey, asc, ascq;
2216	union ccb *ccb = cm->cm_complete_data;
2217	int returned_sense_len;
2218	uint8_t sct, sc;
2219
2220	sct = NVME_STATUS_GET_SCT(nvme_status);
2221	sc = NVME_STATUS_GET_SC(nvme_status);
2222
2223	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2224	skey = SSD_KEY_ILLEGAL_REQUEST;
2225	asc = SCSI_ASC_NO_SENSE;
2226	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2227
2228	switch (sct) {
2229	case NVME_SCT_GENERIC:
2230		switch (sc) {
2231		case NVME_SC_SUCCESS:
2232			status = MPI2_SCSI_STATUS_GOOD;
2233			skey = SSD_KEY_NO_SENSE;
2234			asc = SCSI_ASC_NO_SENSE;
2235			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2236			break;
2237		case NVME_SC_INVALID_OPCODE:
2238			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2239			skey = SSD_KEY_ILLEGAL_REQUEST;
2240			asc = SCSI_ASC_ILLEGAL_COMMAND;
2241			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2242			break;
2243		case NVME_SC_INVALID_FIELD:
2244			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2245			skey = SSD_KEY_ILLEGAL_REQUEST;
2246			asc = SCSI_ASC_INVALID_CDB;
2247			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2248			break;
2249		case NVME_SC_DATA_TRANSFER_ERROR:
2250			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2251			skey = SSD_KEY_MEDIUM_ERROR;
2252			asc = SCSI_ASC_NO_SENSE;
2253			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2254			break;
2255		case NVME_SC_ABORTED_POWER_LOSS:
2256			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2257			skey = SSD_KEY_ABORTED_COMMAND;
2258			asc = SCSI_ASC_WARNING;
2259			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2260			break;
2261		case NVME_SC_INTERNAL_DEVICE_ERROR:
2262			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2263			skey = SSD_KEY_HARDWARE_ERROR;
2264			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2265			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2266			break;
2267		case NVME_SC_ABORTED_BY_REQUEST:
2268		case NVME_SC_ABORTED_SQ_DELETION:
2269		case NVME_SC_ABORTED_FAILED_FUSED:
2270		case NVME_SC_ABORTED_MISSING_FUSED:
2271			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2272			skey = SSD_KEY_ABORTED_COMMAND;
2273			asc = SCSI_ASC_NO_SENSE;
2274			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2275			break;
2276		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2277			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2278			skey = SSD_KEY_ILLEGAL_REQUEST;
2279			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2280			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2281			break;
2282		case NVME_SC_LBA_OUT_OF_RANGE:
2283			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2284			skey = SSD_KEY_ILLEGAL_REQUEST;
2285			asc = SCSI_ASC_ILLEGAL_BLOCK;
2286			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2287			break;
2288		case NVME_SC_CAPACITY_EXCEEDED:
2289			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2290			skey = SSD_KEY_MEDIUM_ERROR;
2291			asc = SCSI_ASC_NO_SENSE;
2292			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2293			break;
2294		case NVME_SC_NAMESPACE_NOT_READY:
2295			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2296			skey = SSD_KEY_NOT_READY;
2297			asc = SCSI_ASC_LUN_NOT_READY;
2298			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2299			break;
2300		}
2301		break;
2302	case NVME_SCT_COMMAND_SPECIFIC:
2303		switch (sc) {
2304		case NVME_SC_INVALID_FORMAT:
2305			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2306			skey = SSD_KEY_ILLEGAL_REQUEST;
2307			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2308			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2309			break;
2310		case NVME_SC_CONFLICTING_ATTRIBUTES:
2311			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2312			skey = SSD_KEY_ILLEGAL_REQUEST;
2313			asc = SCSI_ASC_INVALID_CDB;
2314			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2315			break;
2316		}
2317		break;
2318	case NVME_SCT_MEDIA_ERROR:
2319		switch (sc) {
2320		case NVME_SC_WRITE_FAULTS:
2321			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322			skey = SSD_KEY_MEDIUM_ERROR;
2323			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2324			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2325			break;
2326		case NVME_SC_UNRECOVERED_READ_ERROR:
2327			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2328			skey = SSD_KEY_MEDIUM_ERROR;
2329			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2330			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2331			break;
2332		case NVME_SC_GUARD_CHECK_ERROR:
2333			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2334			skey = SSD_KEY_MEDIUM_ERROR;
2335			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2336			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2337			break;
2338		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2339			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340			skey = SSD_KEY_MEDIUM_ERROR;
2341			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2342			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2343			break;
2344		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2345			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346			skey = SSD_KEY_MEDIUM_ERROR;
2347			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2348			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2349			break;
2350		case NVME_SC_COMPARE_FAILURE:
2351			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2352			skey = SSD_KEY_MISCOMPARE;
2353			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2354			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2355			break;
2356		case NVME_SC_ACCESS_DENIED:
2357			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2358			skey = SSD_KEY_ILLEGAL_REQUEST;
2359			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2360			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2361			break;
2362		}
2363		break;
2364	}
2365
2366	returned_sense_len = sizeof(struct scsi_sense_data);
2367	if (returned_sense_len < ccb->csio.sense_len)
2368		ccb->csio.sense_resid = ccb->csio.sense_len -
2369		    returned_sense_len;
2370	else
2371		ccb->csio.sense_resid = 0;
2372
2373	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2374	    1, skey, asc, ascq, SSD_ELEM_NONE);
2375	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2376
2377	return status;
2378}
2379
2380/** mprsas_complete_nvme_unmap
2381 *
2382 * Complete native NVMe command issued using NVMe Encapsulated
2383 * Request Message.
2384 */
2385static u8
2386mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2387{
2388	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2389	struct nvme_completion *nvme_completion = NULL;
2390	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2391
2392	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2393	if (le16toh(mpi_reply->ErrorResponseCount)){
2394		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2395		scsi_status = mprsas_nvme_trans_status_code(
2396		    nvme_completion->status, cm);
2397	}
2398	return scsi_status;
2399}
2400
2401static void
2402mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2403{
2404	MPI2_SCSI_IO_REPLY *rep;
2405	union ccb *ccb;
2406	struct ccb_scsiio *csio;
2407	struct mprsas_softc *sassc;
2408	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2409	u8 *TLR_bits, TLR_on, *scsi_cdb;
2410	int dir = 0, i;
2411	u16 alloc_len;
2412	struct mprsas_target *target;
2413	target_id_t target_id;
2414
2415	MPR_FUNCTRACE(sc);
2416	mpr_dprint(sc, MPR_TRACE,
2417	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2418	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2419	    cm->cm_targ->outstanding);
2420
2421	callout_stop(&cm->cm_callout);
2422	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2423
2424	sassc = sc->sassc;
2425	ccb = cm->cm_complete_data;
2426	csio = &ccb->csio;
2427	target_id = csio->ccb_h.target_id;
2428	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2429	/*
2430	 * XXX KDM if the chain allocation fails, does it matter if we do
2431	 * the sync and unload here?  It is simpler to do it in every case,
2432	 * assuming it doesn't cause problems.
2433	 */
2434	if (cm->cm_data != NULL) {
2435		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2436			dir = BUS_DMASYNC_POSTREAD;
2437		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2438			dir = BUS_DMASYNC_POSTWRITE;
2439		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2440		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2441	}
2442
2443	cm->cm_targ->completed++;
2444	cm->cm_targ->outstanding--;
2445	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2446	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2447
2448	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2449		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2450		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2451		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2452		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2453		if (cm->cm_reply != NULL)
2454			mprsas_log_command(cm, MPR_RECOVERY,
2455			    "completed timedout cm %p ccb %p during recovery "
2456			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2457			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2458			    rep->SCSIState, le32toh(rep->TransferCount));
2459		else
2460			mprsas_log_command(cm, MPR_RECOVERY,
2461			    "completed timedout cm %p ccb %p during recovery\n",
2462			    cm, cm->cm_ccb);
2463	} else if (cm->cm_targ->tm != NULL) {
2464		if (cm->cm_reply != NULL)
2465			mprsas_log_command(cm, MPR_RECOVERY,
2466			    "completed cm %p ccb %p during recovery "
2467			    "ioc %x scsi %x state %x xfer %u\n",
2468			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2469			    rep->SCSIStatus, rep->SCSIState,
2470			    le32toh(rep->TransferCount));
2471		else
2472			mprsas_log_command(cm, MPR_RECOVERY,
2473			    "completed cm %p ccb %p during recovery\n",
2474			    cm, cm->cm_ccb);
2475	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2476		mprsas_log_command(cm, MPR_RECOVERY,
2477		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2478	}
2479
2480	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2481		/*
2482		 * We ran into an error after we tried to map the command,
2483		 * so we're getting a callback without queueing the command
2484		 * to the hardware.  So we set the status here, and it will
2485		 * be retained below.  We'll go through the "fast path",
2486		 * because there can be no reply when we haven't actually
2487		 * gone out to the hardware.
2488		 */
2489		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2490
2491		/*
2492		 * Currently the only error included in the mask is
2493		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2494		 * chain frames.  We need to freeze the queue until we get
2495		 * a command that completed without this error, which will
2496		 * hopefully have some chain frames attached that we can
2497		 * use.  If we wanted to get smarter about it, we would
2498		 * only unfreeze the queue in this condition when we're
2499		 * sure that we're getting some chain frames back.  That's
2500		 * probably unnecessary.
2501		 */
2502		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2503			xpt_freeze_simq(sassc->sim, 1);
2504			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2505			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2506			    "freezing SIM queue\n");
2507		}
2508	}
2509
2510	/*
2511	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2512	 * flag, and use it in a few places in the rest of this function for
2513	 * convenience. Use the macro if available.
2514	 */
2515	scsi_cdb = scsiio_cdb_ptr(csio);
2516
2517	/*
2518	 * If this is a Start Stop Unit command and it was issued by the driver
2519	 * during shutdown, decrement the refcount to account for all of the
2520	 * commands that were sent.  All SSU commands should be completed before
2521	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2522	 * is TRUE.
2523	 */
2524	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2525		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2526		sc->SSU_refcount--;
2527	}
2528
2529	/* Take the fast path to completion */
2530	if (cm->cm_reply == NULL) {
2531		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2532			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2533				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2534			else {
2535				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2536				csio->scsi_status = SCSI_STATUS_OK;
2537			}
2538			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2539				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2540				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2541				mpr_dprint(sc, MPR_XINFO,
2542				    "Unfreezing SIM queue\n");
2543			}
2544		}
2545
2546		/*
2547		 * There are two scenarios where the status won't be
2548		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2549		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2550		 */
2551		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2552			/*
2553			 * Freeze the dev queue so that commands are
2554			 * executed in the correct order after error
2555			 * recovery.
2556			 */
2557			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2558			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2559		}
2560		mpr_free_command(sc, cm);
2561		xpt_done(ccb);
2562		return;
2563	}
2564
2565	target = &sassc->targets[target_id];
2566	if (scsi_cdb[0] == UNMAP &&
2567	    target->is_nvme &&
2568	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2569		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2570		csio->scsi_status = rep->SCSIStatus;
2571	}
2572
2573	mprsas_log_command(cm, MPR_XINFO,
2574	    "ioc %x scsi %x state %x xfer %u\n",
2575	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2576	    le32toh(rep->TransferCount));
2577
2578	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2579	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2580		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2581		/* FALLTHROUGH */
2582	case MPI2_IOCSTATUS_SUCCESS:
2583	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2584		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2585		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2586			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2587
2588		/* Completion failed at the transport level. */
2589		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2590		    MPI2_SCSI_STATE_TERMINATED)) {
2591			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2592			break;
2593		}
2594
2595		/* In a modern packetized environment, an autosense failure
2596		 * implies that there's not much else that can be done to
2597		 * recover the command.
2598		 */
2599		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2600			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2601			break;
2602		}
2603
2604		/*
2605		 * CAM doesn't care about SAS Response Info data, but if this is
2606		 * the state check if TLR should be done.  If not, clear the
2607		 * TLR_bits for the target.
2608		 */
2609		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2610		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2611		    == MPR_SCSI_RI_INVALID_FRAME)) {
2612			sc->mapping_table[target_id].TLR_bits =
2613			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2614		}
2615
2616		/*
2617		 * Intentionally override the normal SCSI status reporting
2618		 * for these two cases.  These are likely to happen in a
2619		 * multi-initiator environment, and we want to make sure that
2620		 * CAM retries these commands rather than fail them.
2621		 */
2622		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2623		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2624			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2625			break;
2626		}
2627
2628		/* Handle normal status and sense */
2629		csio->scsi_status = rep->SCSIStatus;
2630		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2631			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2632		else
2633			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2634
2635		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2636			int sense_len, returned_sense_len;
2637
2638			returned_sense_len = min(le32toh(rep->SenseCount),
2639			    sizeof(struct scsi_sense_data));
2640			if (returned_sense_len < csio->sense_len)
2641				csio->sense_resid = csio->sense_len -
2642				    returned_sense_len;
2643			else
2644				csio->sense_resid = 0;
2645
2646			sense_len = min(returned_sense_len,
2647			    csio->sense_len - csio->sense_resid);
2648			bzero(&csio->sense_data, sizeof(csio->sense_data));
2649			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2650			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2651		}
2652
2653		/*
2654		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2655		 * and it's page code 0 (Supported Page List), and there is
2656		 * inquiry data, and this is for a sequential access device, and
2657		 * the device is an SSP target, and TLR is supported by the
2658		 * controller, turn the TLR_bits value ON if page 0x90 is
2659		 * supported.
2660		 */
2661		if ((scsi_cdb[0] == INQUIRY) &&
2662		    (scsi_cdb[1] & SI_EVPD) &&
2663		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2664		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2665		    (csio->data_ptr != NULL) &&
2666		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2667		    (sc->control_TLR) &&
2668		    (sc->mapping_table[target_id].device_info &
2669		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2670			vpd_list = (struct scsi_vpd_supported_page_list *)
2671			    csio->data_ptr;
2672			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2673			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2674			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2675			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2676			alloc_len -= csio->resid;
2677			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2678				if (vpd_list->list[i] == 0x90) {
2679					*TLR_bits = TLR_on;
2680					break;
2681				}
2682			}
2683		}
2684
2685		/*
2686		 * If this is a SATA direct-access end device, mark it so that
2687		 * a SCSI StartStopUnit command will be sent to it when the
2688		 * driver is being shutdown.
2689		 */
2690		if ((scsi_cdb[0] == INQUIRY) &&
2691		    (csio->data_ptr != NULL) &&
2692		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2693		    (sc->mapping_table[target_id].device_info &
2694		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2695		    ((sc->mapping_table[target_id].device_info &
2696		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2697		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2698			target = &sassc->targets[target_id];
2699			target->supports_SSU = TRUE;
2700			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2701			    target_id);
2702		}
2703		break;
2704	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2705	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2706		/*
2707		 * If devinfo is 0 this will be a volume.  In that case don't
2708		 * tell CAM that the volume is not there.  We want volumes to
2709		 * be enumerated until they are deleted/removed, not just
2710		 * failed.
2711		 */
2712		if (cm->cm_targ->devinfo == 0)
2713			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2714		else
2715			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2716		break;
2717	case MPI2_IOCSTATUS_INVALID_SGL:
2718		mpr_print_scsiio_cmd(sc, cm);
2719		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2720		break;
2721	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2722		/*
2723		 * This is one of the responses that comes back when an I/O
2724		 * has been aborted.  If it is because of a timeout that we
2725		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2726		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2727		 * command is the same (it gets retried, subject to the
2728		 * retry counter), the only difference is what gets printed
2729		 * on the console.
2730		 */
2731		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2732			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2733		else
2734			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2735		break;
2736	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2737		/* resid is ignored for this condition */
2738		csio->resid = 0;
2739		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2740		break;
2741	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2742	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2743		/*
2744		 * These can sometimes be transient transport-related
2745		 * errors, and sometimes persistent drive-related errors.
2746		 * We used to retry these without decrementing the retry
2747		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2748		 * we hit a persistent drive problem that returns one of
2749		 * these error codes, we would retry indefinitely.  So,
2750		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2751		 * count and avoid infinite retries.  We're taking the
2752		 * potential risk of flagging false failures in the event
2753		 * of a topology-related error (e.g. a SAS expander problem
2754		 * causes a command addressed to a drive to fail), but
2755		 * avoiding getting into an infinite retry loop. However,
2756		 * if we get them while were moving a device, we should
2757		 * fail the request as 'not there' because the device
2758		 * is effectively gone.
2759		 */
2760		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2761			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2762		else
2763			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2764		mpr_dprint(sc, MPR_INFO,
2765		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2766		    mpr_describe_table(mpr_iocstatus_string,
2767		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2768		    target_id, cm->cm_desc.Default.SMID,
2769		    le32toh(rep->IOCLogInfo),
2770		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2771		mpr_dprint(sc, MPR_XINFO,
2772		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2773		    rep->SCSIStatus, rep->SCSIState,
2774		    le32toh(rep->TransferCount));
2775		break;
2776	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2777	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2778	case MPI2_IOCSTATUS_INVALID_VPID:
2779	case MPI2_IOCSTATUS_INVALID_FIELD:
2780	case MPI2_IOCSTATUS_INVALID_STATE:
2781	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2782	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2783	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2784	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2785	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2786	default:
2787		mprsas_log_command(cm, MPR_XINFO,
2788		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2789		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2790		    rep->SCSIStatus, rep->SCSIState,
2791		    le32toh(rep->TransferCount));
2792		csio->resid = cm->cm_length;
2793
2794		if (scsi_cdb[0] == UNMAP &&
2795		    target->is_nvme &&
2796		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2797			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2798		else
2799			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2800
2801		break;
2802	}
2803
2804	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2805
2806	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2807		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2808		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2809		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2810		    "queue\n");
2811	}
2812
2813	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2814		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2815		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2816	}
2817
2818	/*
2819	 * Check to see if we're removing the device. If so, and this is the
2820	 * last command on the queue, proceed with the deferred removal of the
2821	 * device.  Note, for removing a volume, this won't trigger because
2822	 * pending_remove_tm will be NULL.
2823	 */
2824	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2825		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2826		    cm->cm_targ->pending_remove_tm != NULL) {
2827			mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2828			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2829			cm->cm_targ->pending_remove_tm = NULL;
2830		}
2831	}
2832
2833	mpr_free_command(sc, cm);
2834	xpt_done(ccb);
2835}
2836
2837static void
2838mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2839{
2840	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2841	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2842	uint64_t sasaddr;
2843	union ccb *ccb;
2844
2845	ccb = cm->cm_complete_data;
2846
2847	/*
2848	 * Currently there should be no way we can hit this case.  It only
2849	 * happens when we have a failure to allocate chain frames, and SMP
2850	 * commands require two S/G elements only.  That should be handled
2851	 * in the standard request size.
2852	 */
2853	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2854		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2855		    "request!\n", __func__, cm->cm_flags);
2856		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2857		goto bailout;
2858        }
2859
2860	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2861	if (rpl == NULL) {
2862		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2863		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2864		goto bailout;
2865	}
2866
2867	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2868	sasaddr = le32toh(req->SASAddress.Low);
2869	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2870
2871	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2872	    MPI2_IOCSTATUS_SUCCESS ||
2873	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2874		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2875		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2876		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2877		goto bailout;
2878	}
2879
2880	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2881	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2882
2883	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2884		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2885	else
2886		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2887
2888bailout:
2889	/*
2890	 * We sync in both directions because we had DMAs in the S/G list
2891	 * in both directions.
2892	 */
2893	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2894			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2895	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2896	mpr_free_command(sc, cm);
2897	xpt_done(ccb);
2898}
2899
2900static void
2901mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2902{
2903	struct mpr_command *cm;
2904	uint8_t *request, *response;
2905	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2906	struct mpr_softc *sc;
2907	struct sglist *sg;
2908	int error;
2909
2910	sc = sassc->sc;
2911	sg = NULL;
2912	error = 0;
2913
2914	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2915	case CAM_DATA_PADDR:
2916	case CAM_DATA_SG_PADDR:
2917		/*
2918		 * XXX We don't yet support physical addresses here.
2919		 */
2920		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2921		    "supported\n", __func__);
2922		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2923		xpt_done(ccb);
2924		return;
2925	case CAM_DATA_SG:
2926		/*
2927		 * The chip does not support more than one buffer for the
2928		 * request or response.
2929		 */
2930		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2931		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2932			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2933			    "response buffer segments not supported for SMP\n",
2934			    __func__);
2935			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2936			xpt_done(ccb);
2937			return;
2938		}
2939
2940		/*
2941		 * The CAM_SCATTER_VALID flag was originally implemented
2942		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2943		 * We have two.  So, just take that flag to mean that we
2944		 * might have S/G lists, and look at the S/G segment count
2945		 * to figure out whether that is the case for each individual
2946		 * buffer.
2947		 */
2948		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2949			bus_dma_segment_t *req_sg;
2950
2951			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2952			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2953		} else
2954			request = ccb->smpio.smp_request;
2955
2956		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2957			bus_dma_segment_t *rsp_sg;
2958
2959			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2960			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2961		} else
2962			response = ccb->smpio.smp_response;
2963		break;
2964	case CAM_DATA_VADDR:
2965		request = ccb->smpio.smp_request;
2966		response = ccb->smpio.smp_response;
2967		break;
2968	default:
2969		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2970		xpt_done(ccb);
2971		return;
2972	}
2973
2974	cm = mpr_alloc_command(sc);
2975	if (cm == NULL) {
2976		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2977		    __func__);
2978		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2979		xpt_done(ccb);
2980		return;
2981	}
2982
2983	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2984	bzero(req, sizeof(*req));
2985	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2986
2987	/* Allow the chip to use any route to this SAS address. */
2988	req->PhysicalPort = 0xff;
2989
2990	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2991	req->SGLFlags =
2992	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2993
2994	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2995	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2996
2997	mpr_init_sge(cm, req, &req->SGL);
2998
2999	/*
3000	 * Set up a uio to pass into mpr_map_command().  This allows us to
3001	 * do one map command, and one busdma call in there.
3002	 */
3003	cm->cm_uio.uio_iov = cm->cm_iovec;
3004	cm->cm_uio.uio_iovcnt = 2;
3005	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3006
3007	/*
3008	 * The read/write flag isn't used by busdma, but set it just in
3009	 * case.  This isn't exactly accurate, either, since we're going in
3010	 * both directions.
3011	 */
3012	cm->cm_uio.uio_rw = UIO_WRITE;
3013
3014	cm->cm_iovec[0].iov_base = request;
3015	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3016	cm->cm_iovec[1].iov_base = response;
3017	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3018
3019	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3020			       cm->cm_iovec[1].iov_len;
3021
3022	/*
3023	 * Trigger a warning message in mpr_data_cb() for the user if we
3024	 * wind up exceeding two S/G segments.  The chip expects one
3025	 * segment for the request and another for the response.
3026	 */
3027	cm->cm_max_segs = 2;
3028
3029	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3030	cm->cm_complete = mprsas_smpio_complete;
3031	cm->cm_complete_data = ccb;
3032
3033	/*
3034	 * Tell the mapping code that we're using a uio, and that this is
3035	 * an SMP passthrough request.  There is a little special-case
3036	 * logic there (in mpr_data_cb()) to handle the bidirectional
3037	 * transfer.
3038	 */
3039	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3040			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3041
3042	/* The chip data format is little endian. */
3043	req->SASAddress.High = htole32(sasaddr >> 32);
3044	req->SASAddress.Low = htole32(sasaddr);
3045
3046	/*
3047	 * XXX Note that we don't have a timeout/abort mechanism here.
3048	 * From the manual, it looks like task management requests only
3049	 * work for SCSI IO and SATA passthrough requests.  We may need to
3050	 * have a mechanism to retry requests in the event of a chip reset
3051	 * at least.  Hopefully the chip will insure that any errors short
3052	 * of that are relayed back to the driver.
3053	 */
3054	error = mpr_map_command(sc, cm);
3055	if ((error != 0) && (error != EINPROGRESS)) {
3056		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3057		    "mpr_map_command()\n", __func__, error);
3058		goto bailout_error;
3059	}
3060
3061	return;
3062
3063bailout_error:
3064	mpr_free_command(sc, cm);
3065	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3066	xpt_done(ccb);
3067	return;
3068}
3069
3070static void
3071mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3072{
3073	struct mpr_softc *sc;
3074	struct mprsas_target *targ;
3075	uint64_t sasaddr = 0;
3076
3077	sc = sassc->sc;
3078
3079	/*
3080	 * Make sure the target exists.
3081	 */
3082	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3083	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3084	targ = &sassc->targets[ccb->ccb_h.target_id];
3085	if (targ->handle == 0x0) {
3086		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3087		    __func__, ccb->ccb_h.target_id);
3088		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3089		xpt_done(ccb);
3090		return;
3091	}
3092
3093	/*
3094	 * If this device has an embedded SMP target, we'll talk to it
3095	 * directly.
3096	 * figure out what the expander's address is.
3097	 */
3098	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3099		sasaddr = targ->sasaddr;
3100
3101	/*
3102	 * If we don't have a SAS address for the expander yet, try
3103	 * grabbing it from the page 0x83 information cached in the
3104	 * transport layer for this target.  LSI expanders report the
3105	 * expander SAS address as the port-associated SAS address in
3106	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3107	 * 0x83.
3108	 *
3109	 * XXX KDM disable this for now, but leave it commented out so that
3110	 * it is obvious that this is another possible way to get the SAS
3111	 * address.
3112	 *
3113	 * The parent handle method below is a little more reliable, and
3114	 * the other benefit is that it works for devices other than SES
3115	 * devices.  So you can send a SMP request to a da(4) device and it
3116	 * will get routed to the expander that device is attached to.
3117	 * (Assuming the da(4) device doesn't contain an SMP target...)
3118	 */
3119#if 0
3120	if (sasaddr == 0)
3121		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3122#endif
3123
3124	/*
3125	 * If we still don't have a SAS address for the expander, look for
3126	 * the parent device of this device, which is probably the expander.
3127	 */
3128	if (sasaddr == 0) {
3129#ifdef OLD_MPR_PROBE
3130		struct mprsas_target *parent_target;
3131#endif
3132
3133		if (targ->parent_handle == 0x0) {
3134			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3135			    "a valid parent handle!\n", __func__, targ->handle);
3136			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3137			goto bailout;
3138		}
3139#ifdef OLD_MPR_PROBE
3140		parent_target = mprsas_find_target_by_handle(sassc, 0,
3141		    targ->parent_handle);
3142
3143		if (parent_target == NULL) {
3144			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3145			    "a valid parent target!\n", __func__, targ->handle);
3146			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3147			goto bailout;
3148		}
3149
3150		if ((parent_target->devinfo &
3151		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3152			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3153			    "does not have an SMP target!\n", __func__,
3154			    targ->handle, parent_target->handle);
3155			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3156			goto bailout;
3157		}
3158
3159		sasaddr = parent_target->sasaddr;
3160#else /* OLD_MPR_PROBE */
3161		if ((targ->parent_devinfo &
3162		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3163			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3164			    "does not have an SMP target!\n", __func__,
3165			    targ->handle, targ->parent_handle);
3166			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3167			goto bailout;
3168
3169		}
3170		if (targ->parent_sasaddr == 0x0) {
3171			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3172			    "%d does not have a valid SAS address!\n", __func__,
3173			    targ->handle, targ->parent_handle);
3174			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3175			goto bailout;
3176		}
3177
3178		sasaddr = targ->parent_sasaddr;
3179#endif /* OLD_MPR_PROBE */
3180
3181	}
3182
3183	if (sasaddr == 0) {
3184		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3185		    "handle %d\n", __func__, targ->handle);
3186		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3187		goto bailout;
3188	}
3189	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3190
3191	return;
3192
3193bailout:
3194	xpt_done(ccb);
3195
3196}
3197
3198static void
3199mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3200{
3201	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3202	struct mpr_softc *sc;
3203	struct mpr_command *tm;
3204	struct mprsas_target *targ;
3205
3206	MPR_FUNCTRACE(sassc->sc);
3207	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3208
3209	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3210	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3211	sc = sassc->sc;
3212	tm = mprsas_alloc_tm(sc);
3213	if (tm == NULL) {
3214		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3215		    "mprsas_action_resetdev\n");
3216		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3217		xpt_done(ccb);
3218		return;
3219	}
3220
3221	targ = &sassc->targets[ccb->ccb_h.target_id];
3222	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3223	req->DevHandle = htole16(targ->handle);
3224	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3225
3226	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3227		/* SAS Hard Link Reset / SATA Link Reset */
3228		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3229	} else {
3230		/* PCIe Protocol Level Reset*/
3231		req->MsgFlags =
3232		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3233	}
3234
3235	tm->cm_data = NULL;
3236	tm->cm_complete = mprsas_resetdev_complete;
3237	tm->cm_complete_data = ccb;
3238
3239	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3240	    __func__, targ->tid);
3241	tm->cm_targ = targ;
3242
3243	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3244	mpr_map_command(sc, tm);
3245}
3246
3247static void
3248mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3249{
3250	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3251	union ccb *ccb;
3252
3253	MPR_FUNCTRACE(sc);
3254	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3255
3256	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3257	ccb = tm->cm_complete_data;
3258
3259	/*
3260	 * Currently there should be no way we can hit this case.  It only
3261	 * happens when we have a failure to allocate chain frames, and
3262	 * task management commands don't have S/G lists.
3263	 */
3264	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3265		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3266
3267		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3268
3269		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3270		    "handle %#04x! This should not happen!\n", __func__,
3271		    tm->cm_flags, req->DevHandle);
3272		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3273		goto bailout;
3274	}
3275
3276	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3277	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3278
3279	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3280		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3281		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3282		    CAM_LUN_WILDCARD);
3283	}
3284	else
3285		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3286
3287bailout:
3288
3289	mprsas_free_tm(sc, tm);
3290	xpt_done(ccb);
3291}
3292
3293static void
3294mprsas_poll(struct cam_sim *sim)
3295{
3296	struct mprsas_softc *sassc;
3297
3298	sassc = cam_sim_softc(sim);
3299
3300	if (sassc->sc->mpr_debug & MPR_TRACE) {
3301		/* frequent debug messages during a panic just slow
3302		 * everything down too much.
3303		 */
3304		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3305		    __func__);
3306		sassc->sc->mpr_debug &= ~MPR_TRACE;
3307	}
3308
3309	mpr_intr_locked(sassc->sc);
3310}
3311
3312static void
3313mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3314    void *arg)
3315{
3316	struct mpr_softc *sc;
3317
3318	sc = (struct mpr_softc *)callback_arg;
3319
3320	switch (code) {
3321	case AC_ADVINFO_CHANGED: {
3322		struct mprsas_target *target;
3323		struct mprsas_softc *sassc;
3324		struct scsi_read_capacity_data_long rcap_buf;
3325		struct ccb_dev_advinfo cdai;
3326		struct mprsas_lun *lun;
3327		lun_id_t lunid;
3328		int found_lun;
3329		uintptr_t buftype;
3330
3331		buftype = (uintptr_t)arg;
3332
3333		found_lun = 0;
3334		sassc = sc->sassc;
3335
3336		/*
3337		 * We're only interested in read capacity data changes.
3338		 */
3339		if (buftype != CDAI_TYPE_RCAPLONG)
3340			break;
3341
3342		/*
3343		 * We should have a handle for this, but check to make sure.
3344		 */
3345		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3346		    ("Target %d out of bounds in mprsas_async\n",
3347		    xpt_path_target_id(path)));
3348		target = &sassc->targets[xpt_path_target_id(path)];
3349		if (target->handle == 0)
3350			break;
3351
3352		lunid = xpt_path_lun_id(path);
3353
3354		SLIST_FOREACH(lun, &target->luns, lun_link) {
3355			if (lun->lun_id == lunid) {
3356				found_lun = 1;
3357				break;
3358			}
3359		}
3360
3361		if (found_lun == 0) {
3362			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3363			    M_NOWAIT | M_ZERO);
3364			if (lun == NULL) {
3365				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3366				    "LUN for EEDP support.\n");
3367				break;
3368			}
3369			lun->lun_id = lunid;
3370			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3371		}
3372
3373		bzero(&rcap_buf, sizeof(rcap_buf));
3374		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3375		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3376		cdai.ccb_h.flags = CAM_DIR_IN;
3377		cdai.buftype = CDAI_TYPE_RCAPLONG;
3378		cdai.flags = CDAI_FLAG_NONE;
3379		cdai.bufsiz = sizeof(rcap_buf);
3380		cdai.buf = (uint8_t *)&rcap_buf;
3381		xpt_action((union ccb *)&cdai);
3382		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3383			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3384
3385		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3386		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3387			switch (rcap_buf.prot & SRC16_P_TYPE) {
3388			case SRC16_PTYPE_1:
3389			case SRC16_PTYPE_3:
3390				lun->eedp_formatted = TRUE;
3391				lun->eedp_block_size =
3392				    scsi_4btoul(rcap_buf.length);
3393				break;
3394			case SRC16_PTYPE_2:
3395			default:
3396				lun->eedp_formatted = FALSE;
3397				lun->eedp_block_size = 0;
3398				break;
3399			}
3400		} else {
3401			lun->eedp_formatted = FALSE;
3402			lun->eedp_block_size = 0;
3403		}
3404		break;
3405	}
3406	case AC_FOUND_DEVICE:
3407	default:
3408		break;
3409	}
3410}
3411
3412/*
3413 * Set the INRESET flag for this target so that no I/O will be sent to
3414 * the target until the reset has completed.  If an I/O request does
3415 * happen, the devq will be frozen.  The CCB holds the path which is
3416 * used to release the devq.  The devq is released and the CCB is freed
3417 * when the TM completes.
3418 */
3419void
3420mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3421    struct mprsas_target *target, lun_id_t lun_id)
3422{
3423	union ccb *ccb;
3424	path_id_t path_id;
3425
3426	ccb = xpt_alloc_ccb_nowait();
3427	if (ccb) {
3428		path_id = cam_sim_path(sc->sassc->sim);
3429		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3430		    target->tid, lun_id) != CAM_REQ_CMP) {
3431			xpt_free_ccb(ccb);
3432		} else {
3433			tm->cm_ccb = ccb;
3434			tm->cm_targ = target;
3435			target->flags |= MPRSAS_TARGET_INRESET;
3436		}
3437	}
3438}
3439
3440int
3441mprsas_startup(struct mpr_softc *sc)
3442{
3443	/*
3444	 * Send the port enable message and set the wait_for_port_enable flag.
3445	 * This flag helps to keep the simq frozen until all discovery events
3446	 * are processed.
3447	 */
3448	sc->wait_for_port_enable = 1;
3449	mprsas_send_portenable(sc);
3450	return (0);
3451}
3452
3453static int
3454mprsas_send_portenable(struct mpr_softc *sc)
3455{
3456	MPI2_PORT_ENABLE_REQUEST *request;
3457	struct mpr_command *cm;
3458
3459	MPR_FUNCTRACE(sc);
3460
3461	if ((cm = mpr_alloc_command(sc)) == NULL)
3462		return (EBUSY);
3463	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3464	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3465	request->MsgFlags = 0;
3466	request->VP_ID = 0;
3467	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3468	cm->cm_complete = mprsas_portenable_complete;
3469	cm->cm_data = NULL;
3470	cm->cm_sge = NULL;
3471
3472	mpr_map_command(sc, cm);
3473	mpr_dprint(sc, MPR_XINFO,
3474	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3475	    cm, cm->cm_req, cm->cm_complete);
3476	return (0);
3477}
3478
3479static void
3480mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3481{
3482	MPI2_PORT_ENABLE_REPLY *reply;
3483	struct mprsas_softc *sassc;
3484
3485	MPR_FUNCTRACE(sc);
3486	sassc = sc->sassc;
3487
3488	/*
3489	 * Currently there should be no way we can hit this case.  It only
3490	 * happens when we have a failure to allocate chain frames, and
3491	 * port enable commands don't have S/G lists.
3492	 */
3493	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3494		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3495		    "This should not happen!\n", __func__, cm->cm_flags);
3496	}
3497
3498	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3499	if (reply == NULL)
3500		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3501	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3502	    MPI2_IOCSTATUS_SUCCESS)
3503		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3504
3505	mpr_free_command(sc, cm);
3506	/*
3507	 * Done waiting for port enable to complete.  Decrement the refcount.
3508	 * If refcount is 0, discovery is complete and a rescan of the bus can
3509	 * take place.
3510	 */
3511	sc->wait_for_port_enable = 0;
3512	sc->port_enable_complete = 1;
3513	wakeup(&sc->port_enable_complete);
3514	mprsas_startup_decrement(sassc);
3515}
3516
3517int
3518mprsas_check_id(struct mprsas_softc *sassc, int id)
3519{
3520	struct mpr_softc *sc = sassc->sc;
3521	char *ids;
3522	char *name;
3523
3524	ids = &sc->exclude_ids[0];
3525	while((name = strsep(&ids, ",")) != NULL) {
3526		if (name[0] == '\0')
3527			continue;
3528		if (strtol(name, NULL, 0) == (long)id)
3529			return (1);
3530	}
3531
3532	return (0);
3533}
3534
3535void
3536mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3537{
3538	struct mprsas_softc *sassc;
3539	struct mprsas_lun *lun, *lun_tmp;
3540	struct mprsas_target *targ;
3541	int i;
3542
3543	sassc = sc->sassc;
3544	/*
3545	 * The number of targets is based on IOC Facts, so free all of
3546	 * the allocated LUNs for each target and then the target buffer
3547	 * itself.
3548	 */
3549	for (i=0; i< maxtargets; i++) {
3550		targ = &sassc->targets[i];
3551		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3552			free(lun, M_MPR);
3553		}
3554	}
3555	free(sassc->targets, M_MPR);
3556
3557	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3558	    M_MPR, M_WAITOK|M_ZERO);
3559	if (!sassc->targets) {
3560		panic("%s failed to alloc targets with error %d\n",
3561		    __func__, ENOMEM);
3562	}
3563}
3564