mps_sas.c revision 30aa1312f7feed4962a056de3637d1547942f105
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31 *
32 * $FreeBSD$
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/* Communications core for Avago Technologies (LSI) MPT2 */
39
40/* TODO Move headers to mpsvar */
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/selinfo.h>
46#include <sys/module.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/bio.h>
50#include <sys/malloc.h>
51#include <sys/uio.h>
52#include <sys/sysctl.h>
53#include <sys/endian.h>
54#include <sys/queue.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57#include <sys/sbuf.h>
58
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63#include <machine/stdarg.h>
64
65#include <cam/cam.h>
66#include <cam/cam_ccb.h>
67#include <cam/cam_xpt.h>
68#include <cam/cam_debug.h>
69#include <cam/cam_sim.h>
70#include <cam/cam_xpt_sim.h>
71#include <cam/cam_xpt_periph.h>
72#include <cam/cam_periph.h>
73#include <cam/scsi/scsi_all.h>
74#include <cam/scsi/scsi_message.h>
75#include <cam/scsi/smp_all.h>
76
77#include <dev/mps/mpi/mpi2_type.h>
78#include <dev/mps/mpi/mpi2.h>
79#include <dev/mps/mpi/mpi2_ioc.h>
80#include <dev/mps/mpi/mpi2_sas.h>
81#include <dev/mps/mpi/mpi2_cnfg.h>
82#include <dev/mps/mpi/mpi2_init.h>
83#include <dev/mps/mpi/mpi2_tool.h>
84#include <dev/mps/mps_ioctl.h>
85#include <dev/mps/mpsvar.h>
86#include <dev/mps/mps_table.h>
87#include <dev/mps/mps_sas.h>
88
89#define MPSSAS_DISCOVERY_TIMEOUT	20
90#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91
92/*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98static uint8_t op_code_prot[256] = {
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115};
116
117MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118
119static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122static void mpssas_poll(struct cam_sim *sim);
123static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124    struct mps_command *cm);
125static void mpssas_scsiio_timeout(void *data);
126static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128    struct mps_command *cm, union ccb *ccb);
129static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
133static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134			       uint64_t sasaddr);
135static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
136static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137static void mpssas_async(void *callback_arg, uint32_t code,
138			 struct cam_path *path, void *arg);
139static int mpssas_send_portenable(struct mps_softc *sc);
140static void mpssas_portenable_complete(struct mps_softc *sc,
141    struct mps_command *cm);
142
143struct mpssas_target *
144mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
145{
146	struct mpssas_target *target;
147	int i;
148
149	for (i = start; i < sassc->maxtargets; i++) {
150		target = &sassc->targets[i];
151		if (target->handle == handle)
152			return (target);
153	}
154
155	return (NULL);
156}
157
158/* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery.  Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
164 */
165void
166mpssas_startup_increment(struct mpssas_softc *sassc)
167{
168	MPS_FUNCTRACE(sassc->sc);
169
170	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
171		if (sassc->startup_refcount++ == 0) {
172			/* just starting, freeze the simq */
173			mps_dprint(sassc->sc, MPS_INIT,
174			    "%s freezing simq\n", __func__);
175			xpt_hold_boot();
176			xpt_freeze_simq(sassc->sim, 1);
177		}
178		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
179		    sassc->startup_refcount);
180	}
181}
182
183void
184mpssas_release_simq_reinit(struct mpssas_softc *sassc)
185{
186	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
187		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
188		xpt_release_simq(sassc->sim, 1);
189		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
190	}
191}
192
193void
194mpssas_startup_decrement(struct mpssas_softc *sassc)
195{
196	MPS_FUNCTRACE(sassc->sc);
197
198	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199		if (--sassc->startup_refcount == 0) {
200			/* finished all discovery-related actions, release
201			 * the simq and rescan for the latest topology.
202			 */
203			mps_dprint(sassc->sc, MPS_INIT,
204			    "%s releasing simq\n", __func__);
205			sassc->flags &= ~MPSSAS_IN_STARTUP;
206			xpt_release_simq(sassc->sim, 1);
207			xpt_release_boot();
208		}
209		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
210		    sassc->startup_refcount);
211	}
212}
213
214/*
215 * The firmware requires us to stop sending commands when we're doing task
216 * management.
217 * XXX The logic for serializing the device has been made lazy and moved to
218 * mpssas_prepare_for_tm().
219 */
220struct mps_command *
221mpssas_alloc_tm(struct mps_softc *sc)
222{
223	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
224	struct mps_command *tm;
225
226	tm = mps_alloc_high_priority_command(sc);
227	if (tm == NULL)
228		return (NULL);
229
230	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
232	return tm;
233}
234
235void
236mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
237{
238	int target_id = 0xFFFFFFFF;
239
240	if (tm == NULL)
241		return;
242
243	/*
244	 * For TM's the devq is frozen for the device.  Unfreeze it here and
245	 * free the resources used for freezing the devq.  Must clear the
246	 * INRESET flag as well or scsi I/O will not work.
247	 */
248	if (tm->cm_targ != NULL) {
249		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
250		target_id = tm->cm_targ->tid;
251	}
252	if (tm->cm_ccb) {
253		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
254		    target_id);
255		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
256		xpt_free_path(tm->cm_ccb->ccb_h.path);
257		xpt_free_ccb(tm->cm_ccb);
258	}
259
260	mps_free_high_priority_command(sc, tm);
261}
262
263void
264mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
265{
266	struct mpssas_softc *sassc = sc->sassc;
267	path_id_t pathid;
268	target_id_t targetid;
269	union ccb *ccb;
270
271	MPS_FUNCTRACE(sc);
272	pathid = cam_sim_path(sassc->sim);
273	if (targ == NULL)
274		targetid = CAM_TARGET_WILDCARD;
275	else
276		targetid = targ - sassc->targets;
277
278	/*
279	 * Allocate a CCB and schedule a rescan.
280	 */
281	ccb = xpt_alloc_ccb_nowait();
282	if (ccb == NULL) {
283		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
284		return;
285	}
286
287	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
288	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
289		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
290		xpt_free_ccb(ccb);
291		return;
292	}
293
294	if (targetid == CAM_TARGET_WILDCARD)
295		ccb->ccb_h.func_code = XPT_SCAN_BUS;
296	else
297		ccb->ccb_h.func_code = XPT_SCAN_TGT;
298
299	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
300	xpt_rescan(ccb);
301}
302
303static void
304mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
305{
306	struct sbuf sb;
307	va_list ap;
308	char str[224];
309	char path_str[64];
310
311	if (cm == NULL)
312		return;
313
314	/* No need to be in here if debugging isn't enabled */
315	if ((cm->cm_sc->mps_debug & level) == 0)
316		return;
317
318	sbuf_new(&sb, str, sizeof(str), 0);
319
320	va_start(ap, fmt);
321
322	if (cm->cm_ccb != NULL) {
323		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
324				sizeof(path_str));
325		sbuf_cat(&sb, path_str);
326		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
327			scsi_command_string(&cm->cm_ccb->csio, &sb);
328			sbuf_printf(&sb, "length %d ",
329				    cm->cm_ccb->csio.dxfer_len);
330		}
331	}
332	else {
333		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
334		    cam_sim_name(cm->cm_sc->sassc->sim),
335		    cam_sim_unit(cm->cm_sc->sassc->sim),
336		    cam_sim_bus(cm->cm_sc->sassc->sim),
337		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
338		    cm->cm_lun);
339	}
340
341	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
342	sbuf_vprintf(&sb, fmt, ap);
343	sbuf_finish(&sb);
344	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
345
346	va_end(ap);
347}
348
349
350static void
351mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
352{
353	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
354	struct mpssas_target *targ;
355	uint16_t handle;
356
357	MPS_FUNCTRACE(sc);
358
359	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
360	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
361	targ = tm->cm_targ;
362
363	if (reply == NULL) {
364		/* XXX retry the remove after the diag reset completes? */
365		mps_dprint(sc, MPS_FAULT,
366		    "%s NULL reply resetting device 0x%04x\n", __func__,
367		    handle);
368		mpssas_free_tm(sc, tm);
369		return;
370	}
371
372	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373	    MPI2_IOCSTATUS_SUCCESS) {
374		mps_dprint(sc, MPS_ERROR,
375		   "IOCStatus = 0x%x while resetting device 0x%x\n",
376		   le16toh(reply->IOCStatus), handle);
377	}
378
379	mps_dprint(sc, MPS_XINFO,
380	    "Reset aborted %u commands\n", reply->TerminationCount);
381	mps_free_reply(sc, tm->cm_reply_data);
382	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
383
384	mps_dprint(sc, MPS_XINFO,
385	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
386
387	/*
388	 * Don't clear target if remove fails because things will get confusing.
389	 * Leave the devname and sasaddr intact so that we know to avoid reusing
390	 * this target id if possible, and so we can assign the same target id
391	 * to this device if it comes back in the future.
392	 */
393	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
394	    MPI2_IOCSTATUS_SUCCESS) {
395		targ = tm->cm_targ;
396		targ->handle = 0x0;
397		targ->encl_handle = 0x0;
398		targ->encl_slot = 0x0;
399		targ->exp_dev_handle = 0x0;
400		targ->phy_num = 0x0;
401		targ->linkrate = 0x0;
402		targ->devinfo = 0x0;
403		targ->flags = 0x0;
404	}
405
406	mpssas_free_tm(sc, tm);
407}
408
409
410/*
411 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
412 * Otherwise Volume Delete is same as Bare Drive Removal.
413 */
414void
415mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
416{
417	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
418	struct mps_softc *sc;
419	struct mps_command *tm;
420	struct mpssas_target *targ = NULL;
421
422	MPS_FUNCTRACE(sassc->sc);
423	sc = sassc->sc;
424
425#ifdef WD_SUPPORT
426	/*
427	 * If this is a WD controller, determine if the disk should be exposed
428	 * to the OS or not.  If disk should be exposed, return from this
429	 * function without doing anything.
430	 */
431	if (sc->WD_available && (sc->WD_hide_expose ==
432	    MPS_WD_EXPOSE_ALWAYS)) {
433		return;
434	}
435#endif //WD_SUPPORT
436
437	targ = mpssas_find_target_by_handle(sassc, 0, handle);
438	if (targ == NULL) {
439		/* FIXME: what is the action? */
440		/* We don't know about this device? */
441		mps_dprint(sc, MPS_ERROR,
442		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
443		return;
444	}
445
446	targ->flags |= MPSSAS_TARGET_INREMOVAL;
447
448	tm = mpssas_alloc_tm(sc);
449	if (tm == NULL) {
450		mps_dprint(sc, MPS_ERROR,
451		    "%s: command alloc failure\n", __func__);
452		return;
453	}
454
455	mpssas_rescan_target(sc, targ);
456
457	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
458	req->DevHandle = targ->handle;
459	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
460
461	/* SAS Hard Link Reset / SATA Link Reset */
462	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
463
464	tm->cm_targ = targ;
465	tm->cm_data = NULL;
466	tm->cm_complete = mpssas_remove_volume;
467	tm->cm_complete_data = (void *)(uintptr_t)handle;
468
469	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
470	    __func__, targ->tid);
471	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
472
473	mps_map_command(sc, tm);
474}
475
476/*
477 * The MPT2 firmware performs debounce on the link to avoid transient link
478 * errors and false removals.  When it does decide that link has been lost
479 * and a device need to go away, it expects that the host will perform a
480 * target reset and then an op remove.  The reset has the side-effect of
481 * aborting any outstanding requests for the device, which is required for
482 * the op-remove to succeed.  It's not clear if the host should check for
483 * the device coming back alive after the reset.
484 */
485void
486mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
487{
488	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489	struct mps_softc *sc;
490	struct mps_command *cm;
491	struct mpssas_target *targ = NULL;
492
493	MPS_FUNCTRACE(sassc->sc);
494
495	sc = sassc->sc;
496
497	targ = mpssas_find_target_by_handle(sassc, 0, handle);
498	if (targ == NULL) {
499		/* FIXME: what is the action? */
500		/* We don't know about this device? */
501		mps_dprint(sc, MPS_ERROR,
502		    "%s : invalid handle 0x%x \n", __func__, handle);
503		return;
504	}
505
506	targ->flags |= MPSSAS_TARGET_INREMOVAL;
507
508	cm = mpssas_alloc_tm(sc);
509	if (cm == NULL) {
510		mps_dprint(sc, MPS_ERROR,
511		    "%s: command alloc failure\n", __func__);
512		return;
513	}
514
515	mpssas_rescan_target(sc, targ);
516
517	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
518	memset(req, 0, sizeof(*req));
519	req->DevHandle = htole16(targ->handle);
520	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
521
522	/* SAS Hard Link Reset / SATA Link Reset */
523	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
524
525	cm->cm_targ = targ;
526	cm->cm_data = NULL;
527	cm->cm_complete = mpssas_remove_device;
528	cm->cm_complete_data = (void *)(uintptr_t)handle;
529
530	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
531	    __func__, targ->tid);
532	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
533
534	mps_map_command(sc, cm);
535}
536
537static void
538mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
539{
540	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
541	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
542	struct mpssas_target *targ;
543	uint16_t handle;
544
545	MPS_FUNCTRACE(sc);
546
547	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
549	targ = tm->cm_targ;
550
551	/*
552	 * Currently there should be no way we can hit this case.  It only
553	 * happens when we have a failure to allocate chain frames, and
554	 * task management commands don't have S/G lists.
555	 */
556	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
557		mps_dprint(sc, MPS_ERROR,
558		    "%s: cm_flags = %#x for remove of handle %#04x! "
559		    "This should not happen!\n", __func__, tm->cm_flags,
560		    handle);
561	}
562
563	if (reply == NULL) {
564		/* XXX retry the remove after the diag reset completes? */
565		mps_dprint(sc, MPS_FAULT,
566		    "%s NULL reply resetting device 0x%04x\n", __func__,
567		    handle);
568		mpssas_free_tm(sc, tm);
569		return;
570	}
571
572	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
573	    MPI2_IOCSTATUS_SUCCESS) {
574		mps_dprint(sc, MPS_ERROR,
575		   "IOCStatus = 0x%x while resetting device 0x%x\n",
576		   le16toh(reply->IOCStatus), handle);
577	}
578
579	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
580	    le32toh(reply->TerminationCount));
581	mps_free_reply(sc, tm->cm_reply_data);
582	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
583
584	/* Reuse the existing command */
585	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586	memset(req, 0, sizeof(*req));
587	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589	req->DevHandle = htole16(handle);
590	tm->cm_data = NULL;
591	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592	tm->cm_complete = mpssas_remove_complete;
593	tm->cm_complete_data = (void *)(uintptr_t)handle;
594
595	/*
596	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
597	 * They should be aborted or time out and we'll kick thus off there
598	 * if so.
599	 */
600	if (TAILQ_FIRST(&targ->commands) == NULL) {
601		mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
602		mps_map_command(sc, tm);
603		targ->pending_remove_tm = NULL;
604	} else {
605		targ->pending_remove_tm = tm;
606	}
607
608
609	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
610		   targ->tid, handle);
611}
612
613static void
614mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
615{
616	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
617	uint16_t handle;
618	struct mpssas_target *targ;
619	struct mpssas_lun *lun;
620
621	MPS_FUNCTRACE(sc);
622
623	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
624	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
625	targ = tm->cm_targ;
626
627	/*
628	 * At this point, we should have no pending commands for the target.
629	 * The remove target has just completed.
630	 */
631	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
632	    ("%s: no commands should be pending\n", __func__));
633
634
635	/*
636	 * Currently there should be no way we can hit this case.  It only
637	 * happens when we have a failure to allocate chain frames, and
638	 * task management commands don't have S/G lists.
639	 */
640	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
641		mps_dprint(sc, MPS_XINFO,
642			   "%s: cm_flags = %#x for remove of handle %#04x! "
643			   "This should not happen!\n", __func__, tm->cm_flags,
644			   handle);
645		mpssas_free_tm(sc, tm);
646		return;
647	}
648
649	if (reply == NULL) {
650		/* most likely a chip reset */
651		mps_dprint(sc, MPS_FAULT,
652		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
653		mpssas_free_tm(sc, tm);
654		return;
655	}
656
657	mps_dprint(sc, MPS_XINFO,
658	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
659	    handle, le16toh(reply->IOCStatus));
660
661	/*
662	 * Don't clear target if remove fails because things will get confusing.
663	 * Leave the devname and sasaddr intact so that we know to avoid reusing
664	 * this target id if possible, and so we can assign the same target id
665	 * to this device if it comes back in the future.
666	 */
667	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
668	    MPI2_IOCSTATUS_SUCCESS) {
669		targ->handle = 0x0;
670		targ->encl_handle = 0x0;
671		targ->encl_slot = 0x0;
672		targ->exp_dev_handle = 0x0;
673		targ->phy_num = 0x0;
674		targ->linkrate = 0x0;
675		targ->devinfo = 0x0;
676		targ->flags = 0x0;
677
678		while(!SLIST_EMPTY(&targ->luns)) {
679			lun = SLIST_FIRST(&targ->luns);
680			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
681			free(lun, M_MPT2);
682		}
683	}
684
685
686	mpssas_free_tm(sc, tm);
687}
688
689static int
690mpssas_register_events(struct mps_softc *sc)
691{
692	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693
694	bzero(events, 16);
695	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703	setbit(events, MPI2_EVENT_IR_VOLUME);
704	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707
708	mps_register_events(sc, events, mpssas_evt_handler, NULL,
709	    &sc->sassc->mpssas_eh);
710
711	return (0);
712}
713
714int
715mps_attach_sas(struct mps_softc *sc)
716{
717	struct mpssas_softc *sassc;
718	cam_status status;
719	int unit, error = 0, reqs;
720
721	MPS_FUNCTRACE(sc);
722	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
723
724	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
725	if(!sassc) {
726		mps_dprint(sc, MPS_INIT|MPS_ERROR,
727		    "Cannot allocate SAS controller memory\n");
728		return (ENOMEM);
729	}
730
731	/*
732	 * XXX MaxTargets could change during a reinit.  Since we don't
733	 * resize the targets[] array during such an event, cache the value
734	 * of MaxTargets here so that we don't get into trouble later.  This
735	 * should move into the reinit logic.
736	 */
737	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
738	sassc->targets = malloc(sizeof(struct mpssas_target) *
739	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
740	if(!sassc->targets) {
741		mps_dprint(sc, MPS_INIT|MPS_ERROR,
742		    "Cannot allocate SAS target memory\n");
743		free(sassc, M_MPT2);
744		return (ENOMEM);
745	}
746	sc->sassc = sassc;
747	sassc->sc = sc;
748
749	reqs = sc->num_reqs - sc->num_prireqs - 1;
750	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
751		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
752		error = ENOMEM;
753		goto out;
754	}
755
756	unit = device_get_unit(sc->mps_dev);
757	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
758	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
759	if (sassc->sim == NULL) {
760		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
761		error = EINVAL;
762		goto out;
763	}
764
765	TAILQ_INIT(&sassc->ev_queue);
766
767	/* Initialize taskqueue for Event Handling */
768	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
769	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
770	    taskqueue_thread_enqueue, &sassc->ev_tq);
771	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
772	    device_get_nameunit(sc->mps_dev));
773
774	mps_lock(sc);
775
776	/*
777	 * XXX There should be a bus for every port on the adapter, but since
778	 * we're just going to fake the topology for now, we'll pretend that
779	 * everything is just a target on a single bus.
780	 */
781	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
782		mps_dprint(sc, MPS_INIT|MPS_ERROR,
783		    "Error %d registering SCSI bus\n", error);
784		mps_unlock(sc);
785		goto out;
786	}
787
788	/*
789	 * Assume that discovery events will start right away.
790	 *
791	 * Hold off boot until discovery is complete.
792	 */
793	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
794	sc->sassc->startup_refcount = 0;
795	mpssas_startup_increment(sassc);
796
797	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
798
799	/*
800	 * Register for async events so we can determine the EEDP
801	 * capabilities of devices.
802	 */
803	status = xpt_create_path(&sassc->path, /*periph*/NULL,
804	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
805	    CAM_LUN_WILDCARD);
806	if (status != CAM_REQ_CMP) {
807		mps_dprint(sc, MPS_ERROR|MPS_INIT,
808		    "Error %#x creating sim path\n", status);
809		sassc->path = NULL;
810	} else {
811		int event;
812
813		event = AC_ADVINFO_CHANGED;
814		status = xpt_register_async(event, mpssas_async, sc,
815					    sassc->path);
816		if (status != CAM_REQ_CMP) {
817			mps_dprint(sc, MPS_ERROR,
818			    "Error %#x registering async handler for "
819			    "AC_ADVINFO_CHANGED events\n", status);
820			xpt_free_path(sassc->path);
821			sassc->path = NULL;
822		}
823	}
824	if (status != CAM_REQ_CMP) {
825		/*
826		 * EEDP use is the exception, not the rule.
827		 * Warn the user, but do not fail to attach.
828		 */
829		mps_printf(sc, "EEDP capabilities disabled.\n");
830	}
831
832	mps_unlock(sc);
833
834	mpssas_register_events(sc);
835out:
836	if (error)
837		mps_detach_sas(sc);
838
839	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
840	return (error);
841}
842
843int
844mps_detach_sas(struct mps_softc *sc)
845{
846	struct mpssas_softc *sassc;
847	struct mpssas_lun *lun, *lun_tmp;
848	struct mpssas_target *targ;
849	int i;
850
851	MPS_FUNCTRACE(sc);
852
853	if (sc->sassc == NULL)
854		return (0);
855
856	sassc = sc->sassc;
857	mps_deregister_events(sc, sassc->mpssas_eh);
858
859	/*
860	 * Drain and free the event handling taskqueue with the lock
861	 * unheld so that any parallel processing tasks drain properly
862	 * without deadlocking.
863	 */
864	if (sassc->ev_tq != NULL)
865		taskqueue_free(sassc->ev_tq);
866
867	/* Make sure CAM doesn't wedge if we had to bail out early. */
868	mps_lock(sc);
869
870	while (sassc->startup_refcount != 0)
871		mpssas_startup_decrement(sassc);
872
873	/* Deregister our async handler */
874	if (sassc->path != NULL) {
875		xpt_register_async(0, mpssas_async, sc, sassc->path);
876		xpt_free_path(sassc->path);
877		sassc->path = NULL;
878	}
879
880	if (sassc->flags & MPSSAS_IN_STARTUP)
881		xpt_release_simq(sassc->sim, 1);
882
883	if (sassc->sim != NULL) {
884		xpt_bus_deregister(cam_sim_path(sassc->sim));
885		cam_sim_free(sassc->sim, FALSE);
886	}
887
888	mps_unlock(sc);
889
890	if (sassc->devq != NULL)
891		cam_simq_free(sassc->devq);
892
893	for(i=0; i< sassc->maxtargets ;i++) {
894		targ = &sassc->targets[i];
895		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
896			free(lun, M_MPT2);
897		}
898	}
899	free(sassc->targets, M_MPT2);
900	free(sassc, M_MPT2);
901	sc->sassc = NULL;
902
903	return (0);
904}
905
906void
907mpssas_discovery_end(struct mpssas_softc *sassc)
908{
909	struct mps_softc *sc = sassc->sc;
910
911	MPS_FUNCTRACE(sc);
912
913	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
914		callout_stop(&sassc->discovery_callout);
915
916	/*
917	 * After discovery has completed, check the mapping table for any
918	 * missing devices and update their missing counts. Only do this once
919	 * whenever the driver is initialized so that missing counts aren't
920	 * updated unnecessarily. Note that just because discovery has
921	 * completed doesn't mean that events have been processed yet. The
922	 * check_devices function is a callout timer that checks if ALL devices
923	 * are missing. If so, it will wait a little longer for events to
924	 * complete and keep resetting itself until some device in the mapping
925	 * table is not missing, meaning that event processing has started.
926	 */
927	if (sc->track_mapping_events) {
928		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
929		    "completed. Check for missing devices in the mapping "
930		    "table.\n");
931		callout_reset(&sc->device_check_callout,
932		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
933		    sc);
934	}
935}
936
937static void
938mpssas_action(struct cam_sim *sim, union ccb *ccb)
939{
940	struct mpssas_softc *sassc;
941
942	sassc = cam_sim_softc(sim);
943
944	MPS_FUNCTRACE(sassc->sc);
945	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
946	    ccb->ccb_h.func_code);
947	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
948
949	switch (ccb->ccb_h.func_code) {
950	case XPT_PATH_INQ:
951	{
952		struct ccb_pathinq *cpi = &ccb->cpi;
953		struct mps_softc *sc = sassc->sc;
954
955		cpi->version_num = 1;
956		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
957		cpi->target_sprt = 0;
958		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
959		cpi->hba_eng_cnt = 0;
960		cpi->max_target = sassc->maxtargets - 1;
961		cpi->max_lun = 255;
962
963		/*
964		 * initiator_id is set here to an ID outside the set of valid
965		 * target IDs (including volumes).
966		 */
967		cpi->initiator_id = sassc->maxtargets;
968		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
969		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
970		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
971		cpi->unit_number = cam_sim_unit(sim);
972		cpi->bus_id = cam_sim_bus(sim);
973		cpi->base_transfer_speed = 150000;
974		cpi->transport = XPORT_SAS;
975		cpi->transport_version = 0;
976		cpi->protocol = PROTO_SCSI;
977		cpi->protocol_version = SCSI_REV_SPC;
978		cpi->maxio = sc->maxio;
979		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
980		break;
981	}
982	case XPT_GET_TRAN_SETTINGS:
983	{
984		struct ccb_trans_settings	*cts;
985		struct ccb_trans_settings_sas	*sas;
986		struct ccb_trans_settings_scsi	*scsi;
987		struct mpssas_target *targ;
988
989		cts = &ccb->cts;
990		sas = &cts->xport_specific.sas;
991		scsi = &cts->proto_specific.scsi;
992
993		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
994		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
995		    cts->ccb_h.target_id));
996		targ = &sassc->targets[cts->ccb_h.target_id];
997		if (targ->handle == 0x0) {
998			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
999			break;
1000		}
1001
1002		cts->protocol_version = SCSI_REV_SPC2;
1003		cts->transport = XPORT_SAS;
1004		cts->transport_version = 0;
1005
1006		sas->valid = CTS_SAS_VALID_SPEED;
1007		switch (targ->linkrate) {
1008		case 0x08:
1009			sas->bitrate = 150000;
1010			break;
1011		case 0x09:
1012			sas->bitrate = 300000;
1013			break;
1014		case 0x0a:
1015			sas->bitrate = 600000;
1016			break;
1017		default:
1018			sas->valid = 0;
1019		}
1020
1021		cts->protocol = PROTO_SCSI;
1022		scsi->valid = CTS_SCSI_VALID_TQ;
1023		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1024
1025		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1026		break;
1027	}
1028	case XPT_CALC_GEOMETRY:
1029		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1030		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1031		break;
1032	case XPT_RESET_DEV:
1033		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1034		mpssas_action_resetdev(sassc, ccb);
1035		return;
1036	case XPT_RESET_BUS:
1037	case XPT_ABORT:
1038	case XPT_TERM_IO:
1039		mps_dprint(sassc->sc, MPS_XINFO,
1040		    "mpssas_action faking success for abort or reset\n");
1041		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1042		break;
1043	case XPT_SCSI_IO:
1044		mpssas_action_scsiio(sassc, ccb);
1045		return;
1046	case XPT_SMP_IO:
1047		mpssas_action_smpio(sassc, ccb);
1048		return;
1049	default:
1050		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1051		break;
1052	}
1053	xpt_done(ccb);
1054
1055}
1056
1057static void
1058mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1059    target_id_t target_id, lun_id_t lun_id)
1060{
1061	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1062	struct cam_path *path;
1063
1064	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1065	    ac_code, target_id, (uintmax_t)lun_id);
1066
1067	if (xpt_create_path(&path, NULL,
1068		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1069		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1070			   "notification\n");
1071		return;
1072	}
1073
1074	xpt_async(ac_code, path, NULL);
1075	xpt_free_path(path);
1076}
1077
1078static void
1079mpssas_complete_all_commands(struct mps_softc *sc)
1080{
1081	struct mps_command *cm;
1082	int i;
1083	int completed;
1084
1085	MPS_FUNCTRACE(sc);
1086	mtx_assert(&sc->mps_mtx, MA_OWNED);
1087
1088	/* complete all commands with a NULL reply */
1089	for (i = 1; i < sc->num_reqs; i++) {
1090		cm = &sc->commands[i];
1091		if (cm->cm_state == MPS_CM_STATE_FREE)
1092			continue;
1093
1094		cm->cm_state = MPS_CM_STATE_BUSY;
1095		cm->cm_reply = NULL;
1096		completed = 0;
1097
1098		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1099			MPASS(cm->cm_data);
1100			free(cm->cm_data, M_MPT2);
1101			cm->cm_data = NULL;
1102		}
1103
1104		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1105			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1106
1107		if (cm->cm_complete != NULL) {
1108			mpssas_log_command(cm, MPS_RECOVERY,
1109			    "completing cm %p state %x ccb %p for diag reset\n",
1110			    cm, cm->cm_state, cm->cm_ccb);
1111
1112			cm->cm_complete(sc, cm);
1113			completed = 1;
1114		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1115			mpssas_log_command(cm, MPS_RECOVERY,
1116			    "waking up cm %p state %x ccb %p for diag reset\n",
1117			    cm, cm->cm_state, cm->cm_ccb);
1118			wakeup(cm);
1119			completed = 1;
1120		}
1121
1122		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1123			/* this should never happen, but if it does, log */
1124			mpssas_log_command(cm, MPS_RECOVERY,
1125			    "cm %p state %x flags 0x%x ccb %p during diag "
1126			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1127			    cm->cm_ccb);
1128		}
1129	}
1130
1131	sc->io_cmds_active = 0;
1132}
1133
1134void
1135mpssas_handle_reinit(struct mps_softc *sc)
1136{
1137	int i;
1138
1139	/* Go back into startup mode and freeze the simq, so that CAM
1140	 * doesn't send any commands until after we've rediscovered all
1141	 * targets and found the proper device handles for them.
1142	 *
1143	 * After the reset, portenable will trigger discovery, and after all
1144	 * discovery-related activities have finished, the simq will be
1145	 * released.
1146	 */
1147	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1148	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1149	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1150	mpssas_startup_increment(sc->sassc);
1151
1152	/* notify CAM of a bus reset */
1153	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1154	    CAM_LUN_WILDCARD);
1155
1156	/* complete and cleanup after all outstanding commands */
1157	mpssas_complete_all_commands(sc);
1158
1159	mps_dprint(sc, MPS_INIT,
1160	    "%s startup %u after command completion\n", __func__,
1161	    sc->sassc->startup_refcount);
1162
1163	/* zero all the target handles, since they may change after the
1164	 * reset, and we have to rediscover all the targets and use the new
1165	 * handles.
1166	 */
1167	for (i = 0; i < sc->sassc->maxtargets; i++) {
1168		if (sc->sassc->targets[i].outstanding != 0)
1169			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1170			    i, sc->sassc->targets[i].outstanding);
1171		sc->sassc->targets[i].handle = 0x0;
1172		sc->sassc->targets[i].exp_dev_handle = 0x0;
1173		sc->sassc->targets[i].outstanding = 0;
1174		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1175	}
1176}
1177
1178static void
1179mpssas_tm_timeout(void *data)
1180{
1181	struct mps_command *tm = data;
1182	struct mps_softc *sc = tm->cm_sc;
1183
1184	mtx_assert(&sc->mps_mtx, MA_OWNED);
1185
1186	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1187	    "task mgmt %p timed out\n", tm);
1188
1189	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1190	    ("command not inqueue\n"));
1191
1192	tm->cm_state = MPS_CM_STATE_BUSY;
1193	mps_reinit(sc);
1194}
1195
1196static void
1197mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1198{
1199	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1200	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1201	unsigned int cm_count = 0;
1202	struct mps_command *cm;
1203	struct mpssas_target *targ;
1204
1205	callout_stop(&tm->cm_callout);
1206
1207	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1208	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1209	targ = tm->cm_targ;
1210
1211	/*
1212	 * Currently there should be no way we can hit this case.  It only
1213	 * happens when we have a failure to allocate chain frames, and
1214	 * task management commands don't have S/G lists.
1215	 * XXXSL So should it be an assertion?
1216	 */
1217	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1218		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1219		    "%s: cm_flags = %#x for LUN reset! "
1220		   "This should not happen!\n", __func__, tm->cm_flags);
1221		mpssas_free_tm(sc, tm);
1222		return;
1223	}
1224
1225	if (reply == NULL) {
1226		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1227		    tm);
1228		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1229			/* this completion was due to a reset, just cleanup */
1230			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1231			    "reset, ignoring NULL LUN reset reply\n");
1232			targ->tm = NULL;
1233			mpssas_free_tm(sc, tm);
1234		}
1235		else {
1236			/* we should have gotten a reply. */
1237			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1238			    "LUN reset attempt, resetting controller\n");
1239			mps_reinit(sc);
1240		}
1241		return;
1242	}
1243
1244	mps_dprint(sc, MPS_RECOVERY,
1245	    "logical unit reset status 0x%x code 0x%x count %u\n",
1246	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1247	    le32toh(reply->TerminationCount));
1248
1249	/*
1250	 * See if there are any outstanding commands for this LUN.
1251	 * This could be made more efficient by using a per-LU data
1252	 * structure of some sort.
1253	 */
1254	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1255		if (cm->cm_lun == tm->cm_lun)
1256			cm_count++;
1257	}
1258
1259	if (cm_count == 0) {
1260		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1261		    "Finished recovery after LUN reset for target %u\n",
1262		    targ->tid);
1263
1264		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1265
1266		/*
1267		 * We've finished recovery for this logical unit.  check and
1268		 * see if some other logical unit has a timedout command
1269		 * that needs to be processed.
1270		 */
1271		cm = TAILQ_FIRST(&targ->timedout_commands);
1272		if (cm) {
1273			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1274			    "More commands to abort for target %u\n",
1275			    targ->tid);
1276			mpssas_send_abort(sc, tm, cm);
1277		} else {
1278			targ->tm = NULL;
1279			mpssas_free_tm(sc, tm);
1280		}
1281	} else {
1282		/*
1283		 * If we still have commands for this LUN, the reset
1284		 * effectively failed, regardless of the status reported.
1285		 * Escalate to a target reset.
1286		 */
1287		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1288		    "logical unit reset complete for target %u, but still "
1289		    "have %u command(s), sending target reset\n", targ->tid,
1290		    cm_count);
1291		mpssas_send_reset(sc, tm,
1292		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1293	}
1294}
1295
1296static void
1297mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1298{
1299	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1300	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1301	struct mpssas_target *targ;
1302
1303	callout_stop(&tm->cm_callout);
1304
1305	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1306	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1307	targ = tm->cm_targ;
1308
1309	/*
1310	 * Currently there should be no way we can hit this case.  It only
1311	 * happens when we have a failure to allocate chain frames, and
1312	 * task management commands don't have S/G lists.
1313	 */
1314	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1315		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1316			   "This should not happen!\n", __func__, tm->cm_flags);
1317		mpssas_free_tm(sc, tm);
1318		return;
1319	}
1320
1321	if (reply == NULL) {
1322		mps_dprint(sc, MPS_RECOVERY,
1323		    "NULL target reset reply for tm %pi TaskMID %u\n",
1324		    tm, le16toh(req->TaskMID));
1325		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1326			/* this completion was due to a reset, just cleanup */
1327			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1328			    "reset, ignoring NULL target reset reply\n");
1329			targ->tm = NULL;
1330			mpssas_free_tm(sc, tm);
1331		} else {
1332			/* we should have gotten a reply. */
1333			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1334			    "target reset attempt, resetting controller\n");
1335			mps_reinit(sc);
1336		}
1337		return;
1338	}
1339
1340	mps_dprint(sc, MPS_RECOVERY,
1341	    "target reset status 0x%x code 0x%x count %u\n",
1342	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1343	    le32toh(reply->TerminationCount));
1344
1345	if (targ->outstanding == 0) {
1346		/* we've finished recovery for this target and all
1347		 * of its logical units.
1348		 */
1349		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1350		    "Finished reset recovery for target %u\n", targ->tid);
1351
1352		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1353		    CAM_LUN_WILDCARD);
1354
1355		targ->tm = NULL;
1356		mpssas_free_tm(sc, tm);
1357	} else {
1358		/*
1359		 * After a target reset, if this target still has
1360		 * outstanding commands, the reset effectively failed,
1361		 * regardless of the status reported.  escalate.
1362		 */
1363		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1364		    "Target reset complete for target %u, but still have %u "
1365		    "command(s), resetting controller\n", targ->tid,
1366		    targ->outstanding);
1367		mps_reinit(sc);
1368	}
1369}
1370
1371#define MPS_RESET_TIMEOUT 30
1372
1373int
1374mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1375{
1376	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1377	struct mpssas_target *target;
1378	int err;
1379
1380	target = tm->cm_targ;
1381	if (target->handle == 0) {
1382		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1383		    __func__, target->tid);
1384		return -1;
1385	}
1386
1387	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1388	req->DevHandle = htole16(target->handle);
1389	req->TaskType = type;
1390
1391	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1392		/* XXX Need to handle invalid LUNs */
1393		MPS_SET_LUN(req->LUN, tm->cm_lun);
1394		tm->cm_targ->logical_unit_resets++;
1395		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1396		    "Sending logical unit reset to target %u lun %d\n",
1397		    target->tid, tm->cm_lun);
1398		tm->cm_complete = mpssas_logical_unit_reset_complete;
1399		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1400	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1401		/*
1402		 * Target reset method =
1403		 * 	SAS Hard Link Reset / SATA Link Reset
1404		 */
1405		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1406		tm->cm_targ->target_resets++;
1407		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1408		    "Sending target reset to target %u\n", target->tid);
1409		tm->cm_complete = mpssas_target_reset_complete;
1410		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1411	} else {
1412		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1413		return -1;
1414	}
1415
1416	tm->cm_data = NULL;
1417	tm->cm_complete_data = (void *)tm;
1418
1419	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1420	    mpssas_tm_timeout, tm);
1421
1422	err = mps_map_command(sc, tm);
1423	if (err)
1424		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1425		    "error %d sending reset type %u\n",
1426		    err, type);
1427
1428	return err;
1429}
1430
1431
1432static void
1433mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1434{
1435	struct mps_command *cm;
1436	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1437	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1438	struct mpssas_target *targ;
1439
1440	callout_stop(&tm->cm_callout);
1441
1442	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1443	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1444	targ = tm->cm_targ;
1445
1446	/*
1447	 * Currently there should be no way we can hit this case.  It only
1448	 * happens when we have a failure to allocate chain frames, and
1449	 * task management commands don't have S/G lists.
1450	 */
1451	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1452		mps_dprint(sc, MPS_RECOVERY,
1453		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1454		    tm->cm_flags, tm, le16toh(req->TaskMID));
1455		mpssas_free_tm(sc, tm);
1456		return;
1457	}
1458
1459	if (reply == NULL) {
1460		mps_dprint(sc, MPS_RECOVERY,
1461		    "NULL abort reply for tm %p TaskMID %u\n",
1462		    tm, le16toh(req->TaskMID));
1463		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1464			/* this completion was due to a reset, just cleanup */
1465			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1466			    "reset, ignoring NULL abort reply\n");
1467			targ->tm = NULL;
1468			mpssas_free_tm(sc, tm);
1469		} else {
1470			/* we should have gotten a reply. */
1471			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1472			    "abort attempt, resetting controller\n");
1473			mps_reinit(sc);
1474		}
1475		return;
1476	}
1477
1478	mps_dprint(sc, MPS_RECOVERY,
1479	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1480	    le16toh(req->TaskMID),
1481	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1482	    le32toh(reply->TerminationCount));
1483
1484	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1485	if (cm == NULL) {
1486		/*
1487		 * If there are no more timedout commands, we're done with
1488		 * error recovery for this target.
1489		 */
1490		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1491		    "Finished abort recovery for target %u\n", targ->tid);
1492
1493		targ->tm = NULL;
1494		mpssas_free_tm(sc, tm);
1495	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1496		/* abort success, but we have more timedout commands to abort */
1497		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1498		    "Continuing abort recovery for target %u\n", targ->tid);
1499
1500		mpssas_send_abort(sc, tm, cm);
1501	} else {
1502		/* we didn't get a command completion, so the abort
1503		 * failed as far as we're concerned.  escalate.
1504		 */
1505		mps_dprint(sc, MPS_RECOVERY,
1506		    "Abort failed for target %u, sending logical unit reset\n",
1507		    targ->tid);
1508
1509		mpssas_send_reset(sc, tm,
1510		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1511	}
1512}
1513
1514#define MPS_ABORT_TIMEOUT 5
1515
1516static int
1517mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1518{
1519	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1520	struct mpssas_target *targ;
1521	int err;
1522
1523	targ = cm->cm_targ;
1524	if (targ->handle == 0) {
1525		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1526		    "%s null devhandle for target_id %d\n",
1527		    __func__, cm->cm_ccb->ccb_h.target_id);
1528		return -1;
1529	}
1530
1531	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1532	    "Aborting command %p\n", cm);
1533
1534	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1535	req->DevHandle = htole16(targ->handle);
1536	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1537
1538	/* XXX Need to handle invalid LUNs */
1539	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1540
1541	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1542
1543	tm->cm_data = NULL;
1544	tm->cm_complete = mpssas_abort_complete;
1545	tm->cm_complete_data = (void *)tm;
1546	tm->cm_targ = cm->cm_targ;
1547	tm->cm_lun = cm->cm_lun;
1548
1549	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1550	    mpssas_tm_timeout, tm);
1551
1552	targ->aborts++;
1553
1554	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1555
1556	err = mps_map_command(sc, tm);
1557	if (err)
1558		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1559		    "error %d sending abort for cm %p SMID %u\n",
1560		    err, cm, req->TaskMID);
1561	return err;
1562}
1563
1564static void
1565mpssas_scsiio_timeout(void *data)
1566{
1567	sbintime_t elapsed, now;
1568	union ccb *ccb;
1569	struct mps_softc *sc;
1570	struct mps_command *cm;
1571	struct mpssas_target *targ;
1572
1573	cm = (struct mps_command *)data;
1574	sc = cm->cm_sc;
1575	ccb = cm->cm_ccb;
1576	now = sbinuptime();
1577
1578	MPS_FUNCTRACE(sc);
1579	mtx_assert(&sc->mps_mtx, MA_OWNED);
1580
1581	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1582
1583	/*
1584	 * Run the interrupt handler to make sure it's not pending.  This
1585	 * isn't perfect because the command could have already completed
1586	 * and been re-used, though this is unlikely.
1587	 */
1588	mps_intr_locked(sc);
1589	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1590		mpssas_log_command(cm, MPS_XINFO,
1591		    "SCSI command %p almost timed out\n", cm);
1592		return;
1593	}
1594
1595	if (cm->cm_ccb == NULL) {
1596		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1597		return;
1598	}
1599
1600	targ = cm->cm_targ;
1601	targ->timeouts++;
1602
1603	elapsed = now - ccb->ccb_h.qos.sim_data;
1604	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1605	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1606	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1607	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1608
1609	/* XXX first, check the firmware state, to see if it's still
1610	 * operational.  if not, do a diag reset.
1611	 */
1612	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1613	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1614	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1615
1616	if (targ->tm != NULL) {
1617		/* target already in recovery, just queue up another
1618		 * timedout command to be processed later.
1619		 */
1620		mps_dprint(sc, MPS_RECOVERY,
1621		    "queued timedout cm %p for processing by tm %p\n",
1622		    cm, targ->tm);
1623	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1624		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1625		    "Sending abort to target %u for SMID %d\n", targ->tid,
1626		    cm->cm_desc.Default.SMID);
1627		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1628		    cm, targ->tm);
1629
1630		/* start recovery by aborting the first timedout command */
1631		mpssas_send_abort(sc, targ->tm, cm);
1632	} else {
1633		/* XXX queue this target up for recovery once a TM becomes
1634		 * available.  The firmware only has a limited number of
1635		 * HighPriority credits for the high priority requests used
1636		 * for task management, and we ran out.
1637		 *
1638		 * Isilon: don't worry about this for now, since we have
1639		 * more credits than disks in an enclosure, and limit
1640		 * ourselves to one TM per target for recovery.
1641		 */
1642		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1643		    "timedout cm %p failed to allocate a tm\n", cm);
1644	}
1645
1646}
1647
1648static void
1649mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1650{
1651	MPI2_SCSI_IO_REQUEST *req;
1652	struct ccb_scsiio *csio;
1653	struct mps_softc *sc;
1654	struct mpssas_target *targ;
1655	struct mpssas_lun *lun;
1656	struct mps_command *cm;
1657	uint8_t i, lba_byte, *ref_tag_addr;
1658	uint16_t eedp_flags;
1659	uint32_t mpi_control;
1660
1661	sc = sassc->sc;
1662	MPS_FUNCTRACE(sc);
1663	mtx_assert(&sc->mps_mtx, MA_OWNED);
1664
1665	csio = &ccb->csio;
1666	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1667	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1668	     csio->ccb_h.target_id));
1669	targ = &sassc->targets[csio->ccb_h.target_id];
1670	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1671	if (targ->handle == 0x0) {
1672		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1673		    __func__, csio->ccb_h.target_id);
1674		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1675		xpt_done(ccb);
1676		return;
1677	}
1678	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1679		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1680		    "supported %u\n", __func__, csio->ccb_h.target_id);
1681		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1682		xpt_done(ccb);
1683		return;
1684	}
1685	/*
1686	 * Sometimes, it is possible to get a command that is not "In
1687	 * Progress" and was actually aborted by the upper layer.  Check for
1688	 * this here and complete the command without error.
1689	 */
1690	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1691		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1692		    "target %u\n", __func__, csio->ccb_h.target_id);
1693		xpt_done(ccb);
1694		return;
1695	}
1696	/*
1697	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1698	 * that the volume has timed out.  We want volumes to be enumerated
1699	 * until they are deleted/removed, not just failed. In either event,
1700	 * we're removing the target due to a firmware event telling us
1701	 * the device is now gone (as opposed to some transient event). Since
1702	 * we're opting to remove failed devices from the OS's view, we need
1703	 * to propagate that status up the stack.
1704	 */
1705	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1706		if (targ->devinfo == 0)
1707			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1708		else
1709			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1710		xpt_done(ccb);
1711		return;
1712	}
1713
1714	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1715		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1716		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1717		xpt_done(ccb);
1718		return;
1719	}
1720
1721	/*
1722	 * If target has a reset in progress, freeze the devq and return.  The
1723	 * devq will be released when the TM reset is finished.
1724	 */
1725	if (targ->flags & MPSSAS_TARGET_INRESET) {
1726		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1727		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1728		    __func__, targ->tid);
1729		xpt_freeze_devq(ccb->ccb_h.path, 1);
1730		xpt_done(ccb);
1731		return;
1732	}
1733
1734	cm = mps_alloc_command(sc);
1735	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1736		if (cm != NULL) {
1737			mps_free_command(sc, cm);
1738		}
1739		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1740			xpt_freeze_simq(sassc->sim, 1);
1741			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1742		}
1743		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1744		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1745		xpt_done(ccb);
1746		return;
1747	}
1748
1749	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1750	bzero(req, sizeof(*req));
1751	req->DevHandle = htole16(targ->handle);
1752	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1753	req->MsgFlags = 0;
1754	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1755	req->SenseBufferLength = MPS_SENSE_LEN;
1756	req->SGLFlags = 0;
1757	req->ChainOffset = 0;
1758	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1759	req->SGLOffset1= 0;
1760	req->SGLOffset2= 0;
1761	req->SGLOffset3= 0;
1762	req->SkipCount = 0;
1763	req->DataLength = htole32(csio->dxfer_len);
1764	req->BidirectionalDataLength = 0;
1765	req->IoFlags = htole16(csio->cdb_len);
1766	req->EEDPFlags = 0;
1767
1768	/* Note: BiDirectional transfers are not supported */
1769	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1770	case CAM_DIR_IN:
1771		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1772		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1773		break;
1774	case CAM_DIR_OUT:
1775		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1776		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1777		break;
1778	case CAM_DIR_NONE:
1779	default:
1780		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1781		break;
1782	}
1783
1784	if (csio->cdb_len == 32)
1785                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1786	/*
1787	 * It looks like the hardware doesn't require an explicit tag
1788	 * number for each transaction.  SAM Task Management not supported
1789	 * at the moment.
1790	 */
1791	switch (csio->tag_action) {
1792	case MSG_HEAD_OF_Q_TAG:
1793		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1794		break;
1795	case MSG_ORDERED_Q_TAG:
1796		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1797		break;
1798	case MSG_ACA_TASK:
1799		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1800		break;
1801	case CAM_TAG_ACTION_NONE:
1802	case MSG_SIMPLE_Q_TAG:
1803	default:
1804		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1805		break;
1806	}
1807	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1808	req->Control = htole32(mpi_control);
1809	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1810		mps_free_command(sc, cm);
1811		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1812		xpt_done(ccb);
1813		return;
1814	}
1815
1816	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1817		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1818	else
1819		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1820	req->IoFlags = htole16(csio->cdb_len);
1821
1822	/*
1823	 * Check if EEDP is supported and enabled.  If it is then check if the
1824	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1825	 * is formatted for EEDP support.  If all of this is true, set CDB up
1826	 * for EEDP transfer.
1827	 */
1828	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1829	if (sc->eedp_enabled && eedp_flags) {
1830		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1831			if (lun->lun_id == csio->ccb_h.target_lun) {
1832				break;
1833			}
1834		}
1835
1836		if ((lun != NULL) && (lun->eedp_formatted)) {
1837			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1838			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1839			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1840			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1841			req->EEDPFlags = htole16(eedp_flags);
1842
1843			/*
1844			 * If CDB less than 32, fill in Primary Ref Tag with
1845			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1846			 * already there.  Also, set protection bit.  FreeBSD
1847			 * currently does not support CDBs bigger than 16, but
1848			 * the code doesn't hurt, and will be here for the
1849			 * future.
1850			 */
1851			if (csio->cdb_len != 32) {
1852				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1853				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1854				    PrimaryReferenceTag;
1855				for (i = 0; i < 4; i++) {
1856					*ref_tag_addr =
1857					    req->CDB.CDB32[lba_byte + i];
1858					ref_tag_addr++;
1859				}
1860				req->CDB.EEDP32.PrimaryReferenceTag =
1861					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1862				req->CDB.EEDP32.PrimaryApplicationTagMask =
1863				    0xFFFF;
1864				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1865				    0x20;
1866			} else {
1867				eedp_flags |=
1868				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1869				req->EEDPFlags = htole16(eedp_flags);
1870				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1871				    0x1F) | 0x20;
1872			}
1873		}
1874	}
1875
1876	cm->cm_length = csio->dxfer_len;
1877	if (cm->cm_length != 0) {
1878		cm->cm_data = ccb;
1879		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1880	} else {
1881		cm->cm_data = NULL;
1882	}
1883	cm->cm_sge = &req->SGL;
1884	cm->cm_sglsize = (32 - 24) * 4;
1885	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1886	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1887	cm->cm_complete = mpssas_scsiio_complete;
1888	cm->cm_complete_data = ccb;
1889	cm->cm_targ = targ;
1890	cm->cm_lun = csio->ccb_h.target_lun;
1891	cm->cm_ccb = ccb;
1892
1893	/*
1894	 * If HBA is a WD and the command is not for a retry, try to build a
1895	 * direct I/O message. If failed, or the command is for a retry, send
1896	 * the I/O to the IR volume itself.
1897	 */
1898	if (sc->WD_valid_config) {
1899		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1900			mpssas_direct_drive_io(sassc, cm, ccb);
1901		} else {
1902			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1903		}
1904	}
1905
1906#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1907	if (csio->bio != NULL)
1908		biotrack(csio->bio, __func__);
1909#endif
1910	csio->ccb_h.qos.sim_data = sbinuptime();
1911	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1912	    mpssas_scsiio_timeout, cm, 0);
1913
1914	targ->issued++;
1915	targ->outstanding++;
1916	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1917	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1918
1919	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1920	    __func__, cm, ccb, targ->outstanding);
1921
1922	mps_map_command(sc, cm);
1923	return;
1924}
1925
1926/**
1927 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1928 */
1929static void
1930mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1931    Mpi2SCSIIOReply_t *mpi_reply)
1932{
1933	u32 response_info;
1934	u8 *response_bytes;
1935	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1936	    MPI2_IOCSTATUS_MASK;
1937	u8 scsi_state = mpi_reply->SCSIState;
1938	u8 scsi_status = mpi_reply->SCSIStatus;
1939	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1940	const char *desc_ioc_state, *desc_scsi_status;
1941
1942	if (log_info == 0x31170000)
1943		return;
1944
1945	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1946	    ioc_status);
1947	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1948	    scsi_status);
1949
1950	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1951	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1952
1953	/*
1954	 *We can add more detail about underflow data here
1955	 * TO-DO
1956	 */
1957	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1958	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1959	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1960	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1961
1962	if (sc->mps_debug & MPS_XINFO &&
1963		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1964		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1965		scsi_sense_print(csio);
1966		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1967	}
1968
1969	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1970		response_info = le32toh(mpi_reply->ResponseInfo);
1971		response_bytes = (u8 *)&response_info;
1972		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1973		    response_bytes[0],
1974		    mps_describe_table(mps_scsi_taskmgmt_string,
1975		    response_bytes[0]));
1976	}
1977}
1978
1979static void
1980mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1981{
1982	MPI2_SCSI_IO_REPLY *rep;
1983	union ccb *ccb;
1984	struct ccb_scsiio *csio;
1985	struct mpssas_softc *sassc;
1986	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1987	u8 *TLR_bits, TLR_on;
1988	int dir = 0, i;
1989	u16 alloc_len;
1990	struct mpssas_target *target;
1991	target_id_t target_id;
1992
1993	MPS_FUNCTRACE(sc);
1994	mps_dprint(sc, MPS_TRACE,
1995	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1996	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1997	    cm->cm_targ->outstanding);
1998
1999	callout_stop(&cm->cm_callout);
2000	mtx_assert(&sc->mps_mtx, MA_OWNED);
2001
2002	sassc = sc->sassc;
2003	ccb = cm->cm_complete_data;
2004	csio = &ccb->csio;
2005	target_id = csio->ccb_h.target_id;
2006	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2007	/*
2008	 * XXX KDM if the chain allocation fails, does it matter if we do
2009	 * the sync and unload here?  It is simpler to do it in every case,
2010	 * assuming it doesn't cause problems.
2011	 */
2012	if (cm->cm_data != NULL) {
2013		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2014			dir = BUS_DMASYNC_POSTREAD;
2015		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2016			dir = BUS_DMASYNC_POSTWRITE;
2017		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2018		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2019	}
2020
2021	cm->cm_targ->completed++;
2022	cm->cm_targ->outstanding--;
2023	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2024	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2025
2026#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2027	if (ccb->csio.bio != NULL)
2028		biotrack(ccb->csio.bio, __func__);
2029#endif
2030
2031	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2032		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2033		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2034		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2035		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2036		if (cm->cm_reply != NULL)
2037			mpssas_log_command(cm, MPS_RECOVERY,
2038			    "completed timedout cm %p ccb %p during recovery "
2039			    "ioc %x scsi %x state %x xfer %u\n",
2040			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2041			    rep->SCSIStatus, rep->SCSIState,
2042			    le32toh(rep->TransferCount));
2043		else
2044			mpssas_log_command(cm, MPS_RECOVERY,
2045			    "completed timedout cm %p ccb %p during recovery\n",
2046			    cm, cm->cm_ccb);
2047	} else if (cm->cm_targ->tm != NULL) {
2048		if (cm->cm_reply != NULL)
2049			mpssas_log_command(cm, MPS_RECOVERY,
2050			    "completed cm %p ccb %p during recovery "
2051			    "ioc %x scsi %x state %x xfer %u\n",
2052			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2053			    rep->SCSIStatus, rep->SCSIState,
2054			    le32toh(rep->TransferCount));
2055		else
2056			mpssas_log_command(cm, MPS_RECOVERY,
2057			    "completed cm %p ccb %p during recovery\n",
2058			    cm, cm->cm_ccb);
2059	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2060		mpssas_log_command(cm, MPS_RECOVERY,
2061		    "reset completed cm %p ccb %p\n",
2062		    cm, cm->cm_ccb);
2063	}
2064
2065	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2066		/*
2067		 * We ran into an error after we tried to map the command,
2068		 * so we're getting a callback without queueing the command
2069		 * to the hardware.  So we set the status here, and it will
2070		 * be retained below.  We'll go through the "fast path",
2071		 * because there can be no reply when we haven't actually
2072		 * gone out to the hardware.
2073		 */
2074		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2075
2076		/*
2077		 * Currently the only error included in the mask is
2078		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2079		 * chain frames.  We need to freeze the queue until we get
2080		 * a command that completed without this error, which will
2081		 * hopefully have some chain frames attached that we can
2082		 * use.  If we wanted to get smarter about it, we would
2083		 * only unfreeze the queue in this condition when we're
2084		 * sure that we're getting some chain frames back.  That's
2085		 * probably unnecessary.
2086		 */
2087		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2088			xpt_freeze_simq(sassc->sim, 1);
2089			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2090			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2091				   "freezing SIM queue\n");
2092		}
2093	}
2094
2095	/*
2096	 * If this is a Start Stop Unit command and it was issued by the driver
2097	 * during shutdown, decrement the refcount to account for all of the
2098	 * commands that were sent.  All SSU commands should be completed before
2099	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2100	 * is TRUE.
2101	 */
2102	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2103		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2104		sc->SSU_refcount--;
2105	}
2106
2107	/* Take the fast path to completion */
2108	if (cm->cm_reply == NULL) {
2109		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2110			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2111				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2112			else {
2113				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2114				ccb->csio.scsi_status = SCSI_STATUS_OK;
2115			}
2116			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2117				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2118				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2119				mps_dprint(sc, MPS_XINFO,
2120				    "Unfreezing SIM queue\n");
2121			}
2122		}
2123
2124		/*
2125		 * There are two scenarios where the status won't be
2126		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2127		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2128		 */
2129		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2130			/*
2131			 * Freeze the dev queue so that commands are
2132			 * executed in the correct order after error
2133			 * recovery.
2134			 */
2135			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2136			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2137		}
2138		mps_free_command(sc, cm);
2139		xpt_done(ccb);
2140		return;
2141	}
2142
2143	mpssas_log_command(cm, MPS_XINFO,
2144	    "ioc %x scsi %x state %x xfer %u\n",
2145	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2146	    le32toh(rep->TransferCount));
2147
2148	/*
2149	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2150	 * Volume if an error occurred (normal I/O retry).  Use the original
2151	 * CCB, but set a flag that this will be a retry so that it's sent to
2152	 * the original volume.  Free the command but reuse the CCB.
2153	 */
2154	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2155		mps_free_command(sc, cm);
2156		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2157		mpssas_action_scsiio(sassc, ccb);
2158		return;
2159	} else
2160		ccb->ccb_h.sim_priv.entries[0].field = 0;
2161
2162	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2163	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2164		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2165		/* FALLTHROUGH */
2166	case MPI2_IOCSTATUS_SUCCESS:
2167	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2168
2169		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2170		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2171			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2172
2173		/* Completion failed at the transport level. */
2174		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2175		    MPI2_SCSI_STATE_TERMINATED)) {
2176			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2177			break;
2178		}
2179
2180		/* In a modern packetized environment, an autosense failure
2181		 * implies that there's not much else that can be done to
2182		 * recover the command.
2183		 */
2184		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2185			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2186			break;
2187		}
2188
2189		/*
2190		 * CAM doesn't care about SAS Response Info data, but if this is
2191		 * the state check if TLR should be done.  If not, clear the
2192		 * TLR_bits for the target.
2193		 */
2194		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2195		    ((le32toh(rep->ResponseInfo) &
2196		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2197		    MPS_SCSI_RI_INVALID_FRAME)) {
2198			sc->mapping_table[target_id].TLR_bits =
2199			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2200		}
2201
2202		/*
2203		 * Intentionally override the normal SCSI status reporting
2204		 * for these two cases.  These are likely to happen in a
2205		 * multi-initiator environment, and we want to make sure that
2206		 * CAM retries these commands rather than fail them.
2207		 */
2208		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2209		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2210			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2211			break;
2212		}
2213
2214		/* Handle normal status and sense */
2215		csio->scsi_status = rep->SCSIStatus;
2216		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2217			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2218		else
2219			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2220
2221		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2222			int sense_len, returned_sense_len;
2223
2224			returned_sense_len = min(le32toh(rep->SenseCount),
2225			    sizeof(struct scsi_sense_data));
2226			if (returned_sense_len < ccb->csio.sense_len)
2227				ccb->csio.sense_resid = ccb->csio.sense_len -
2228					returned_sense_len;
2229			else
2230				ccb->csio.sense_resid = 0;
2231
2232			sense_len = min(returned_sense_len,
2233			    ccb->csio.sense_len - ccb->csio.sense_resid);
2234			bzero(&ccb->csio.sense_data,
2235			      sizeof(ccb->csio.sense_data));
2236			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2237			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2238		}
2239
2240		/*
2241		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2242		 * and it's page code 0 (Supported Page List), and there is
2243		 * inquiry data, and this is for a sequential access device, and
2244		 * the device is an SSP target, and TLR is supported by the
2245		 * controller, turn the TLR_bits value ON if page 0x90 is
2246		 * supported.
2247		 */
2248		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2249		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2250		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2251		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2252		    (csio->data_ptr != NULL) &&
2253		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2254		    (sc->control_TLR) &&
2255		    (sc->mapping_table[target_id].device_info &
2256		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2257			vpd_list = (struct scsi_vpd_supported_page_list *)
2258			    csio->data_ptr;
2259			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2260			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2261			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2262			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2263			    csio->cdb_io.cdb_bytes[4];
2264			alloc_len -= csio->resid;
2265			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2266				if (vpd_list->list[i] == 0x90) {
2267					*TLR_bits = TLR_on;
2268					break;
2269				}
2270			}
2271		}
2272
2273		/*
2274		 * If this is a SATA direct-access end device, mark it so that
2275		 * a SCSI StartStopUnit command will be sent to it when the
2276		 * driver is being shutdown.
2277		 */
2278		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2279		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2280		    (sc->mapping_table[target_id].device_info &
2281		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2282		    ((sc->mapping_table[target_id].device_info &
2283		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2284		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2285			target = &sassc->targets[target_id];
2286			target->supports_SSU = TRUE;
2287			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2288			    target_id);
2289		}
2290		break;
2291	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2292	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2293		/*
2294		 * If devinfo is 0 this will be a volume.  In that case don't
2295		 * tell CAM that the volume is not there.  We want volumes to
2296		 * be enumerated until they are deleted/removed, not just
2297		 * failed.
2298		 */
2299		if (cm->cm_targ->devinfo == 0)
2300			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2301		else
2302			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2303		break;
2304	case MPI2_IOCSTATUS_INVALID_SGL:
2305		mps_print_scsiio_cmd(sc, cm);
2306		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2307		break;
2308	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2309		/*
2310		 * This is one of the responses that comes back when an I/O
2311		 * has been aborted.  If it is because of a timeout that we
2312		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2313		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2314		 * command is the same (it gets retried, subject to the
2315		 * retry counter), the only difference is what gets printed
2316		 * on the console.
2317		 */
2318		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2319			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2320		else
2321			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2322		break;
2323	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2324		/* resid is ignored for this condition */
2325		csio->resid = 0;
2326		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2327		break;
2328	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2329	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2330		/*
2331		 * These can sometimes be transient transport-related
2332		 * errors, and sometimes persistent drive-related errors.
2333		 * We used to retry these without decrementing the retry
2334		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2335		 * we hit a persistent drive problem that returns one of
2336		 * these error codes, we would retry indefinitely.  So,
2337		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2338		 * count and avoid infinite retries.  We're taking the
2339		 * potential risk of flagging false failures in the event
2340		 * of a topology-related error (e.g. a SAS expander problem
2341		 * causes a command addressed to a drive to fail), but
2342		 * avoiding getting into an infinite retry loop. However,
2343		 * if we get them while were moving a device, we should
2344		 * fail the request as 'not there' because the device
2345		 * is effectively gone.
2346		 */
2347		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2348			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2349		else
2350			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2351		mps_dprint(sc, MPS_INFO,
2352		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2353		    mps_describe_table(mps_iocstatus_string,
2354		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2355		    target_id, cm->cm_desc.Default.SMID,
2356		    le32toh(rep->IOCLogInfo),
2357		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2358		mps_dprint(sc, MPS_XINFO,
2359		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2360		    rep->SCSIStatus, rep->SCSIState,
2361		    le32toh(rep->TransferCount));
2362		break;
2363	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2364	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2365	case MPI2_IOCSTATUS_INVALID_VPID:
2366	case MPI2_IOCSTATUS_INVALID_FIELD:
2367	case MPI2_IOCSTATUS_INVALID_STATE:
2368	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2369	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2370	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2371	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2372	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2373	default:
2374		mpssas_log_command(cm, MPS_XINFO,
2375		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2376		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2377		    rep->SCSIStatus, rep->SCSIState,
2378		    le32toh(rep->TransferCount));
2379		csio->resid = cm->cm_length;
2380		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2381		break;
2382	}
2383
2384	mps_sc_failed_io_info(sc,csio,rep);
2385
2386	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2387		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2388		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2389		mps_dprint(sc, MPS_XINFO, "Command completed, "
2390		    "unfreezing SIM queue\n");
2391	}
2392
2393	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2394		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2395		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2396	}
2397
2398	/*
2399	 * Check to see if we're removing the device. If so, and this is the
2400	 * last command on the queue, proceed with the deferred removal of the
2401	 * device.  Note, for removing a volume, this won't trigger because
2402	 * pending_remove_tm will be NULL.
2403	 */
2404	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2405		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2406		    cm->cm_targ->pending_remove_tm != NULL) {
2407			mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2408			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2409			cm->cm_targ->pending_remove_tm = NULL;
2410		}
2411	}
2412
2413	mps_free_command(sc, cm);
2414	xpt_done(ccb);
2415}
2416
2417/* All Request reached here are Endian safe */
2418static void
2419mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2420    union ccb *ccb) {
2421	pMpi2SCSIIORequest_t	pIO_req;
2422	struct mps_softc	*sc = sassc->sc;
2423	uint64_t		virtLBA;
2424	uint32_t		physLBA, stripe_offset, stripe_unit;
2425	uint32_t		io_size, column;
2426	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2427
2428	/*
2429	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2430	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2431	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2432	 * bit different than the 10/16 CDBs, handle them separately.
2433	 */
2434	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2435	CDB = pIO_req->CDB.CDB32;
2436
2437	/*
2438	 * Handle 6 byte CDBs.
2439	 */
2440	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2441	    (CDB[0] == WRITE_6))) {
2442		/*
2443		 * Get the transfer size in blocks.
2444		 */
2445		io_size = (cm->cm_length >> sc->DD_block_exponent);
2446
2447		/*
2448		 * Get virtual LBA given in the CDB.
2449		 */
2450		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2451		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2452
2453		/*
2454		 * Check that LBA range for I/O does not exceed volume's
2455		 * MaxLBA.
2456		 */
2457		if ((virtLBA + (uint64_t)io_size - 1) <=
2458		    sc->DD_max_lba) {
2459			/*
2460			 * Check if the I/O crosses a stripe boundary.  If not,
2461			 * translate the virtual LBA to a physical LBA and set
2462			 * the DevHandle for the PhysDisk to be used.  If it
2463			 * does cross a boundary, do normal I/O.  To get the
2464			 * right DevHandle to use, get the map number for the
2465			 * column, then use that map number to look up the
2466			 * DevHandle of the PhysDisk.
2467			 */
2468			stripe_offset = (uint32_t)virtLBA &
2469			    (sc->DD_stripe_size - 1);
2470			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2471				physLBA = (uint32_t)virtLBA >>
2472				    sc->DD_stripe_exponent;
2473				stripe_unit = physLBA / sc->DD_num_phys_disks;
2474				column = physLBA % sc->DD_num_phys_disks;
2475				pIO_req->DevHandle =
2476				    htole16(sc->DD_column_map[column].dev_handle);
2477				/* ???? Is this endian safe*/
2478				cm->cm_desc.SCSIIO.DevHandle =
2479				    pIO_req->DevHandle;
2480
2481				physLBA = (stripe_unit <<
2482				    sc->DD_stripe_exponent) + stripe_offset;
2483				ptrLBA = &pIO_req->CDB.CDB32[1];
2484				physLBA_byte = (uint8_t)(physLBA >> 16);
2485				*ptrLBA = physLBA_byte;
2486				ptrLBA = &pIO_req->CDB.CDB32[2];
2487				physLBA_byte = (uint8_t)(physLBA >> 8);
2488				*ptrLBA = physLBA_byte;
2489				ptrLBA = &pIO_req->CDB.CDB32[3];
2490				physLBA_byte = (uint8_t)physLBA;
2491				*ptrLBA = physLBA_byte;
2492
2493				/*
2494				 * Set flag that Direct Drive I/O is
2495				 * being done.
2496				 */
2497				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2498			}
2499		}
2500		return;
2501	}
2502
2503	/*
2504	 * Handle 10, 12 or 16 byte CDBs.
2505	 */
2506	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2507	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2508	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2509	    (CDB[0] == WRITE_12))) {
2510		/*
2511		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2512		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2513		 * the else section.  10-byte and 12-byte CDB's are OK.
2514		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2515		 * ready to accept 12byte CDB for Direct IOs.
2516		 */
2517		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2518		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2519		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2520			/*
2521			 * Get the transfer size in blocks.
2522			 */
2523			io_size = (cm->cm_length >> sc->DD_block_exponent);
2524
2525			/*
2526			 * Get virtual LBA.  Point to correct lower 4 bytes of
2527			 * LBA in the CDB depending on command.
2528			 */
2529			lba_idx = ((CDB[0] == READ_12) ||
2530				(CDB[0] == WRITE_12) ||
2531				(CDB[0] == READ_10) ||
2532				(CDB[0] == WRITE_10))? 2 : 6;
2533			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2534			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2535			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2536			    (uint64_t)CDB[lba_idx + 3];
2537
2538			/*
2539			 * Check that LBA range for I/O does not exceed volume's
2540			 * MaxLBA.
2541			 */
2542			if ((virtLBA + (uint64_t)io_size - 1) <=
2543			    sc->DD_max_lba) {
2544				/*
2545				 * Check if the I/O crosses a stripe boundary.
2546				 * If not, translate the virtual LBA to a
2547				 * physical LBA and set the DevHandle for the
2548				 * PhysDisk to be used.  If it does cross a
2549				 * boundary, do normal I/O.  To get the right
2550				 * DevHandle to use, get the map number for the
2551				 * column, then use that map number to look up
2552				 * the DevHandle of the PhysDisk.
2553				 */
2554				stripe_offset = (uint32_t)virtLBA &
2555				    (sc->DD_stripe_size - 1);
2556				if ((stripe_offset + io_size) <=
2557				    sc->DD_stripe_size) {
2558					physLBA = (uint32_t)virtLBA >>
2559					    sc->DD_stripe_exponent;
2560					stripe_unit = physLBA /
2561					    sc->DD_num_phys_disks;
2562					column = physLBA %
2563					    sc->DD_num_phys_disks;
2564					pIO_req->DevHandle =
2565					    htole16(sc->DD_column_map[column].
2566					    dev_handle);
2567					cm->cm_desc.SCSIIO.DevHandle =
2568					    pIO_req->DevHandle;
2569
2570					physLBA = (stripe_unit <<
2571					    sc->DD_stripe_exponent) +
2572					    stripe_offset;
2573					ptrLBA =
2574					    &pIO_req->CDB.CDB32[lba_idx];
2575					physLBA_byte = (uint8_t)(physLBA >> 24);
2576					*ptrLBA = physLBA_byte;
2577					ptrLBA =
2578					    &pIO_req->CDB.CDB32[lba_idx + 1];
2579					physLBA_byte = (uint8_t)(physLBA >> 16);
2580					*ptrLBA = physLBA_byte;
2581					ptrLBA =
2582					    &pIO_req->CDB.CDB32[lba_idx + 2];
2583					physLBA_byte = (uint8_t)(physLBA >> 8);
2584					*ptrLBA = physLBA_byte;
2585					ptrLBA =
2586					    &pIO_req->CDB.CDB32[lba_idx + 3];
2587					physLBA_byte = (uint8_t)physLBA;
2588					*ptrLBA = physLBA_byte;
2589
2590					/*
2591					 * Set flag that Direct Drive I/O is
2592					 * being done.
2593					 */
2594					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2595				}
2596			}
2597		} else {
2598			/*
2599			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2600			 * 0.  Get the transfer size in blocks.
2601			 */
2602			io_size = (cm->cm_length >> sc->DD_block_exponent);
2603
2604			/*
2605			 * Get virtual LBA.
2606			 */
2607			virtLBA = ((uint64_t)CDB[2] << 54) |
2608			    ((uint64_t)CDB[3] << 48) |
2609			    ((uint64_t)CDB[4] << 40) |
2610			    ((uint64_t)CDB[5] << 32) |
2611			    ((uint64_t)CDB[6] << 24) |
2612			    ((uint64_t)CDB[7] << 16) |
2613			    ((uint64_t)CDB[8] << 8) |
2614			    (uint64_t)CDB[9];
2615
2616			/*
2617			 * Check that LBA range for I/O does not exceed volume's
2618			 * MaxLBA.
2619			 */
2620			if ((virtLBA + (uint64_t)io_size - 1) <=
2621			    sc->DD_max_lba) {
2622				/*
2623				 * Check if the I/O crosses a stripe boundary.
2624				 * If not, translate the virtual LBA to a
2625				 * physical LBA and set the DevHandle for the
2626				 * PhysDisk to be used.  If it does cross a
2627				 * boundary, do normal I/O.  To get the right
2628				 * DevHandle to use, get the map number for the
2629				 * column, then use that map number to look up
2630				 * the DevHandle of the PhysDisk.
2631				 */
2632				stripe_offset = (uint32_t)virtLBA &
2633				    (sc->DD_stripe_size - 1);
2634				if ((stripe_offset + io_size) <=
2635				    sc->DD_stripe_size) {
2636					physLBA = (uint32_t)(virtLBA >>
2637					    sc->DD_stripe_exponent);
2638					stripe_unit = physLBA /
2639					    sc->DD_num_phys_disks;
2640					column = physLBA %
2641					    sc->DD_num_phys_disks;
2642					pIO_req->DevHandle =
2643					    htole16(sc->DD_column_map[column].
2644					    dev_handle);
2645					cm->cm_desc.SCSIIO.DevHandle =
2646					    pIO_req->DevHandle;
2647
2648					physLBA = (stripe_unit <<
2649					    sc->DD_stripe_exponent) +
2650					    stripe_offset;
2651
2652					/*
2653					 * Set upper 4 bytes of LBA to 0.  We
2654					 * assume that the phys disks are less
2655					 * than 2 TB's in size.  Then, set the
2656					 * lower 4 bytes.
2657					 */
2658					pIO_req->CDB.CDB32[2] = 0;
2659					pIO_req->CDB.CDB32[3] = 0;
2660					pIO_req->CDB.CDB32[4] = 0;
2661					pIO_req->CDB.CDB32[5] = 0;
2662					ptrLBA = &pIO_req->CDB.CDB32[6];
2663					physLBA_byte = (uint8_t)(physLBA >> 24);
2664					*ptrLBA = physLBA_byte;
2665					ptrLBA = &pIO_req->CDB.CDB32[7];
2666					physLBA_byte = (uint8_t)(physLBA >> 16);
2667					*ptrLBA = physLBA_byte;
2668					ptrLBA = &pIO_req->CDB.CDB32[8];
2669					physLBA_byte = (uint8_t)(physLBA >> 8);
2670					*ptrLBA = physLBA_byte;
2671					ptrLBA = &pIO_req->CDB.CDB32[9];
2672					physLBA_byte = (uint8_t)physLBA;
2673					*ptrLBA = physLBA_byte;
2674
2675					/*
2676					 * Set flag that Direct Drive I/O is
2677					 * being done.
2678					 */
2679					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2680				}
2681			}
2682		}
2683	}
2684}
2685
2686static void
2687mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2688{
2689	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2690	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2691	uint64_t sasaddr;
2692	union ccb *ccb;
2693
2694	ccb = cm->cm_complete_data;
2695
2696	/*
2697	 * Currently there should be no way we can hit this case.  It only
2698	 * happens when we have a failure to allocate chain frames, and SMP
2699	 * commands require two S/G elements only.  That should be handled
2700	 * in the standard request size.
2701	 */
2702	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2703		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2704			   __func__, cm->cm_flags);
2705		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2706		goto bailout;
2707        }
2708
2709	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2710	if (rpl == NULL) {
2711		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2712		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2713		goto bailout;
2714	}
2715
2716	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2717	sasaddr = le32toh(req->SASAddress.Low);
2718	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2719
2720	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2721	    MPI2_IOCSTATUS_SUCCESS ||
2722	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2723		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2724		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2725		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2726		goto bailout;
2727	}
2728
2729	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2730		   "%#jx completed successfully\n", __func__,
2731		   (uintmax_t)sasaddr);
2732
2733	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2734		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2735	else
2736		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2737
2738bailout:
2739	/*
2740	 * We sync in both directions because we had DMAs in the S/G list
2741	 * in both directions.
2742	 */
2743	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2744			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2745	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2746	mps_free_command(sc, cm);
2747	xpt_done(ccb);
2748}
2749
2750static void
2751mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2752{
2753	struct mps_command *cm;
2754	uint8_t *request, *response;
2755	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2756	struct mps_softc *sc;
2757	int error;
2758
2759	sc = sassc->sc;
2760	error = 0;
2761
2762	/*
2763	 * XXX We don't yet support physical addresses here.
2764	 */
2765	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2766	case CAM_DATA_PADDR:
2767	case CAM_DATA_SG_PADDR:
2768		mps_dprint(sc, MPS_ERROR,
2769			   "%s: physical addresses not supported\n", __func__);
2770		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2771		xpt_done(ccb);
2772		return;
2773	case CAM_DATA_SG:
2774		/*
2775		 * The chip does not support more than one buffer for the
2776		 * request or response.
2777		 */
2778	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2779		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2780			mps_dprint(sc, MPS_ERROR,
2781				   "%s: multiple request or response "
2782				   "buffer segments not supported for SMP\n",
2783				   __func__);
2784			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2785			xpt_done(ccb);
2786			return;
2787		}
2788
2789		/*
2790		 * The CAM_SCATTER_VALID flag was originally implemented
2791		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2792		 * We have two.  So, just take that flag to mean that we
2793		 * might have S/G lists, and look at the S/G segment count
2794		 * to figure out whether that is the case for each individual
2795		 * buffer.
2796		 */
2797		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2798			bus_dma_segment_t *req_sg;
2799
2800			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2801			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2802		} else
2803			request = ccb->smpio.smp_request;
2804
2805		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2806			bus_dma_segment_t *rsp_sg;
2807
2808			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2809			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2810		} else
2811			response = ccb->smpio.smp_response;
2812		break;
2813	case CAM_DATA_VADDR:
2814		request = ccb->smpio.smp_request;
2815		response = ccb->smpio.smp_response;
2816		break;
2817	default:
2818		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2819		xpt_done(ccb);
2820		return;
2821	}
2822
2823	cm = mps_alloc_command(sc);
2824	if (cm == NULL) {
2825		mps_dprint(sc, MPS_ERROR,
2826		    "%s: cannot allocate command\n", __func__);
2827		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2828		xpt_done(ccb);
2829		return;
2830	}
2831
2832	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2833	bzero(req, sizeof(*req));
2834	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2835
2836	/* Allow the chip to use any route to this SAS address. */
2837	req->PhysicalPort = 0xff;
2838
2839	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2840	req->SGLFlags =
2841	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2842
2843	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2844	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2845
2846	mpi_init_sge(cm, req, &req->SGL);
2847
2848	/*
2849	 * Set up a uio to pass into mps_map_command().  This allows us to
2850	 * do one map command, and one busdma call in there.
2851	 */
2852	cm->cm_uio.uio_iov = cm->cm_iovec;
2853	cm->cm_uio.uio_iovcnt = 2;
2854	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2855
2856	/*
2857	 * The read/write flag isn't used by busdma, but set it just in
2858	 * case.  This isn't exactly accurate, either, since we're going in
2859	 * both directions.
2860	 */
2861	cm->cm_uio.uio_rw = UIO_WRITE;
2862
2863	cm->cm_iovec[0].iov_base = request;
2864	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2865	cm->cm_iovec[1].iov_base = response;
2866	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2867
2868	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2869			       cm->cm_iovec[1].iov_len;
2870
2871	/*
2872	 * Trigger a warning message in mps_data_cb() for the user if we
2873	 * wind up exceeding two S/G segments.  The chip expects one
2874	 * segment for the request and another for the response.
2875	 */
2876	cm->cm_max_segs = 2;
2877
2878	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2879	cm->cm_complete = mpssas_smpio_complete;
2880	cm->cm_complete_data = ccb;
2881
2882	/*
2883	 * Tell the mapping code that we're using a uio, and that this is
2884	 * an SMP passthrough request.  There is a little special-case
2885	 * logic there (in mps_data_cb()) to handle the bidirectional
2886	 * transfer.
2887	 */
2888	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2889			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2890
2891	/* The chip data format is little endian. */
2892	req->SASAddress.High = htole32(sasaddr >> 32);
2893	req->SASAddress.Low = htole32(sasaddr);
2894
2895	/*
2896	 * XXX Note that we don't have a timeout/abort mechanism here.
2897	 * From the manual, it looks like task management requests only
2898	 * work for SCSI IO and SATA passthrough requests.  We may need to
2899	 * have a mechanism to retry requests in the event of a chip reset
2900	 * at least.  Hopefully the chip will insure that any errors short
2901	 * of that are relayed back to the driver.
2902	 */
2903	error = mps_map_command(sc, cm);
2904	if ((error != 0) && (error != EINPROGRESS)) {
2905		mps_dprint(sc, MPS_ERROR,
2906			   "%s: error %d returned from mps_map_command()\n",
2907			   __func__, error);
2908		goto bailout_error;
2909	}
2910
2911	return;
2912
2913bailout_error:
2914	mps_free_command(sc, cm);
2915	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2916	xpt_done(ccb);
2917	return;
2918
2919}
2920
2921static void
2922mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2923{
2924	struct mps_softc *sc;
2925	struct mpssas_target *targ;
2926	uint64_t sasaddr = 0;
2927
2928	sc = sassc->sc;
2929
2930	/*
2931	 * Make sure the target exists.
2932	 */
2933	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2934	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2935	targ = &sassc->targets[ccb->ccb_h.target_id];
2936	if (targ->handle == 0x0) {
2937		mps_dprint(sc, MPS_ERROR,
2938			   "%s: target %d does not exist!\n", __func__,
2939			   ccb->ccb_h.target_id);
2940		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2941		xpt_done(ccb);
2942		return;
2943	}
2944
2945	/*
2946	 * If this device has an embedded SMP target, we'll talk to it
2947	 * directly.
2948	 * figure out what the expander's address is.
2949	 */
2950	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2951		sasaddr = targ->sasaddr;
2952
2953	/*
2954	 * If we don't have a SAS address for the expander yet, try
2955	 * grabbing it from the page 0x83 information cached in the
2956	 * transport layer for this target.  LSI expanders report the
2957	 * expander SAS address as the port-associated SAS address in
2958	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2959	 * 0x83.
2960	 *
2961	 * XXX KDM disable this for now, but leave it commented out so that
2962	 * it is obvious that this is another possible way to get the SAS
2963	 * address.
2964	 *
2965	 * The parent handle method below is a little more reliable, and
2966	 * the other benefit is that it works for devices other than SES
2967	 * devices.  So you can send a SMP request to a da(4) device and it
2968	 * will get routed to the expander that device is attached to.
2969	 * (Assuming the da(4) device doesn't contain an SMP target...)
2970	 */
2971#if 0
2972	if (sasaddr == 0)
2973		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2974#endif
2975
2976	/*
2977	 * If we still don't have a SAS address for the expander, look for
2978	 * the parent device of this device, which is probably the expander.
2979	 */
2980	if (sasaddr == 0) {
2981#ifdef OLD_MPS_PROBE
2982		struct mpssas_target *parent_target;
2983#endif
2984
2985		if (targ->parent_handle == 0x0) {
2986			mps_dprint(sc, MPS_ERROR,
2987				   "%s: handle %d does not have a valid "
2988				   "parent handle!\n", __func__, targ->handle);
2989			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2990			goto bailout;
2991		}
2992#ifdef OLD_MPS_PROBE
2993		parent_target = mpssas_find_target_by_handle(sassc, 0,
2994			targ->parent_handle);
2995
2996		if (parent_target == NULL) {
2997			mps_dprint(sc, MPS_ERROR,
2998				   "%s: handle %d does not have a valid "
2999				   "parent target!\n", __func__, targ->handle);
3000			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3001			goto bailout;
3002		}
3003
3004		if ((parent_target->devinfo &
3005		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3006			mps_dprint(sc, MPS_ERROR,
3007				   "%s: handle %d parent %d does not "
3008				   "have an SMP target!\n", __func__,
3009				   targ->handle, parent_target->handle);
3010			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3011			goto bailout;
3012
3013		}
3014
3015		sasaddr = parent_target->sasaddr;
3016#else /* OLD_MPS_PROBE */
3017		if ((targ->parent_devinfo &
3018		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3019			mps_dprint(sc, MPS_ERROR,
3020				   "%s: handle %d parent %d does not "
3021				   "have an SMP target!\n", __func__,
3022				   targ->handle, targ->parent_handle);
3023			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3024			goto bailout;
3025
3026		}
3027		if (targ->parent_sasaddr == 0x0) {
3028			mps_dprint(sc, MPS_ERROR,
3029				   "%s: handle %d parent handle %d does "
3030				   "not have a valid SAS address!\n",
3031				   __func__, targ->handle, targ->parent_handle);
3032			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3033			goto bailout;
3034		}
3035
3036		sasaddr = targ->parent_sasaddr;
3037#endif /* OLD_MPS_PROBE */
3038
3039	}
3040
3041	if (sasaddr == 0) {
3042		mps_dprint(sc, MPS_INFO,
3043			   "%s: unable to find SAS address for handle %d\n",
3044			   __func__, targ->handle);
3045		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3046		goto bailout;
3047	}
3048	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3049
3050	return;
3051
3052bailout:
3053	xpt_done(ccb);
3054
3055}
3056
3057static void
3058mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3059{
3060	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3061	struct mps_softc *sc;
3062	struct mps_command *tm;
3063	struct mpssas_target *targ;
3064
3065	MPS_FUNCTRACE(sassc->sc);
3066	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3067
3068	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3069	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3070	     ccb->ccb_h.target_id));
3071	sc = sassc->sc;
3072	tm = mpssas_alloc_tm(sc);
3073	if (tm == NULL) {
3074		mps_dprint(sc, MPS_ERROR,
3075		    "command alloc failure in mpssas_action_resetdev\n");
3076		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3077		xpt_done(ccb);
3078		return;
3079	}
3080
3081	targ = &sassc->targets[ccb->ccb_h.target_id];
3082	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3083	req->DevHandle = htole16(targ->handle);
3084	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3085
3086	/* SAS Hard Link Reset / SATA Link Reset */
3087	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3088
3089	tm->cm_data = NULL;
3090	tm->cm_complete = mpssas_resetdev_complete;
3091	tm->cm_complete_data = ccb;
3092	tm->cm_targ = targ;
3093
3094	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3095	mps_map_command(sc, tm);
3096}
3097
3098static void
3099mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3100{
3101	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3102	union ccb *ccb;
3103
3104	MPS_FUNCTRACE(sc);
3105	mtx_assert(&sc->mps_mtx, MA_OWNED);
3106
3107	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3108	ccb = tm->cm_complete_data;
3109
3110	/*
3111	 * Currently there should be no way we can hit this case.  It only
3112	 * happens when we have a failure to allocate chain frames, and
3113	 * task management commands don't have S/G lists.
3114	 */
3115	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3116		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3117
3118		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3119
3120		mps_dprint(sc, MPS_ERROR,
3121			   "%s: cm_flags = %#x for reset of handle %#04x! "
3122			   "This should not happen!\n", __func__, tm->cm_flags,
3123			   req->DevHandle);
3124		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3125		goto bailout;
3126	}
3127
3128	mps_dprint(sc, MPS_XINFO,
3129	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3130	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3131
3132	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3133		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3134		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3135		    CAM_LUN_WILDCARD);
3136	}
3137	else
3138		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3139
3140bailout:
3141
3142	mpssas_free_tm(sc, tm);
3143	xpt_done(ccb);
3144}
3145
3146static void
3147mpssas_poll(struct cam_sim *sim)
3148{
3149	struct mpssas_softc *sassc;
3150
3151	sassc = cam_sim_softc(sim);
3152
3153	if (sassc->sc->mps_debug & MPS_TRACE) {
3154		/* frequent debug messages during a panic just slow
3155		 * everything down too much.
3156		 */
3157		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3158		sassc->sc->mps_debug &= ~MPS_TRACE;
3159	}
3160
3161	mps_intr_locked(sassc->sc);
3162}
3163
3164static void
3165mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3166	     void *arg)
3167{
3168	struct mps_softc *sc;
3169
3170	sc = (struct mps_softc *)callback_arg;
3171
3172	switch (code) {
3173	case AC_ADVINFO_CHANGED: {
3174		struct mpssas_target *target;
3175		struct mpssas_softc *sassc;
3176		struct scsi_read_capacity_data_long rcap_buf;
3177		struct ccb_dev_advinfo cdai;
3178		struct mpssas_lun *lun;
3179		lun_id_t lunid;
3180		int found_lun;
3181		uintptr_t buftype;
3182
3183		buftype = (uintptr_t)arg;
3184
3185		found_lun = 0;
3186		sassc = sc->sassc;
3187
3188		/*
3189		 * We're only interested in read capacity data changes.
3190		 */
3191		if (buftype != CDAI_TYPE_RCAPLONG)
3192			break;
3193
3194		/*
3195		 * We should have a handle for this, but check to make sure.
3196		 */
3197		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3198		    ("Target %d out of bounds in mpssas_async\n",
3199		    xpt_path_target_id(path)));
3200		target = &sassc->targets[xpt_path_target_id(path)];
3201		if (target->handle == 0)
3202			break;
3203
3204		lunid = xpt_path_lun_id(path);
3205
3206		SLIST_FOREACH(lun, &target->luns, lun_link) {
3207			if (lun->lun_id == lunid) {
3208				found_lun = 1;
3209				break;
3210			}
3211		}
3212
3213		if (found_lun == 0) {
3214			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3215				     M_NOWAIT | M_ZERO);
3216			if (lun == NULL) {
3217				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3218					   "LUN for EEDP support.\n");
3219				break;
3220			}
3221			lun->lun_id = lunid;
3222			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3223		}
3224
3225		bzero(&rcap_buf, sizeof(rcap_buf));
3226		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3227		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3228		cdai.ccb_h.flags = CAM_DIR_IN;
3229		cdai.buftype = CDAI_TYPE_RCAPLONG;
3230		cdai.flags = CDAI_FLAG_NONE;
3231		cdai.bufsiz = sizeof(rcap_buf);
3232		cdai.buf = (uint8_t *)&rcap_buf;
3233		xpt_action((union ccb *)&cdai);
3234		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3235			cam_release_devq(cdai.ccb_h.path,
3236					 0, 0, 0, FALSE);
3237
3238		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3239		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3240			switch (rcap_buf.prot & SRC16_P_TYPE) {
3241			case SRC16_PTYPE_1:
3242			case SRC16_PTYPE_3:
3243				lun->eedp_formatted = TRUE;
3244				lun->eedp_block_size =
3245				    scsi_4btoul(rcap_buf.length);
3246				break;
3247			case SRC16_PTYPE_2:
3248			default:
3249				lun->eedp_formatted = FALSE;
3250				lun->eedp_block_size = 0;
3251				break;
3252			}
3253		} else {
3254			lun->eedp_formatted = FALSE;
3255			lun->eedp_block_size = 0;
3256		}
3257		break;
3258	}
3259	default:
3260		break;
3261	}
3262}
3263
3264/*
3265 * Set the INRESET flag for this target so that no I/O will be sent to
3266 * the target until the reset has completed.  If an I/O request does
3267 * happen, the devq will be frozen.  The CCB holds the path which is
3268 * used to release the devq.  The devq is released and the CCB is freed
3269 * when the TM completes.
3270 */
3271void
3272mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3273    struct mpssas_target *target, lun_id_t lun_id)
3274{
3275	union ccb *ccb;
3276	path_id_t path_id;
3277
3278	ccb = xpt_alloc_ccb_nowait();
3279	if (ccb) {
3280		path_id = cam_sim_path(sc->sassc->sim);
3281		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3282		    target->tid, lun_id) != CAM_REQ_CMP) {
3283			xpt_free_ccb(ccb);
3284		} else {
3285			tm->cm_ccb = ccb;
3286			tm->cm_targ = target;
3287			target->flags |= MPSSAS_TARGET_INRESET;
3288		}
3289	}
3290}
3291
3292int
3293mpssas_startup(struct mps_softc *sc)
3294{
3295
3296	/*
3297	 * Send the port enable message and set the wait_for_port_enable flag.
3298	 * This flag helps to keep the simq frozen until all discovery events
3299	 * are processed.
3300	 */
3301	sc->wait_for_port_enable = 1;
3302	mpssas_send_portenable(sc);
3303	return (0);
3304}
3305
3306static int
3307mpssas_send_portenable(struct mps_softc *sc)
3308{
3309	MPI2_PORT_ENABLE_REQUEST *request;
3310	struct mps_command *cm;
3311
3312	MPS_FUNCTRACE(sc);
3313
3314	if ((cm = mps_alloc_command(sc)) == NULL)
3315		return (EBUSY);
3316	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3317	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3318	request->MsgFlags = 0;
3319	request->VP_ID = 0;
3320	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3321	cm->cm_complete = mpssas_portenable_complete;
3322	cm->cm_data = NULL;
3323	cm->cm_sge = NULL;
3324
3325	mps_map_command(sc, cm);
3326	mps_dprint(sc, MPS_XINFO,
3327	    "mps_send_portenable finished cm %p req %p complete %p\n",
3328	    cm, cm->cm_req, cm->cm_complete);
3329	return (0);
3330}
3331
3332static void
3333mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3334{
3335	MPI2_PORT_ENABLE_REPLY *reply;
3336	struct mpssas_softc *sassc;
3337
3338	MPS_FUNCTRACE(sc);
3339	sassc = sc->sassc;
3340
3341	/*
3342	 * Currently there should be no way we can hit this case.  It only
3343	 * happens when we have a failure to allocate chain frames, and
3344	 * port enable commands don't have S/G lists.
3345	 */
3346	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3347		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3348			   "This should not happen!\n", __func__, cm->cm_flags);
3349	}
3350
3351	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3352	if (reply == NULL)
3353		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3354	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3355	    MPI2_IOCSTATUS_SUCCESS)
3356		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3357
3358	mps_free_command(sc, cm);
3359
3360	/*
3361	 * Get WarpDrive info after discovery is complete but before the scan
3362	 * starts.  At this point, all devices are ready to be exposed to the
3363	 * OS.  If devices should be hidden instead, take them out of the
3364	 * 'targets' array before the scan.  The devinfo for a disk will have
3365	 * some info and a volume's will be 0.  Use that to remove disks.
3366	 */
3367	mps_wd_config_pages(sc);
3368
3369	/*
3370	 * Done waiting for port enable to complete.  Decrement the refcount.
3371	 * If refcount is 0, discovery is complete and a rescan of the bus can
3372	 * take place.  Since the simq was explicitly frozen before port
3373	 * enable, it must be explicitly released here to keep the
3374	 * freeze/release count in sync.
3375	 */
3376	sc->wait_for_port_enable = 0;
3377	sc->port_enable_complete = 1;
3378	wakeup(&sc->port_enable_complete);
3379	mpssas_startup_decrement(sassc);
3380}
3381
3382int
3383mpssas_check_id(struct mpssas_softc *sassc, int id)
3384{
3385	struct mps_softc *sc = sassc->sc;
3386	char *ids;
3387	char *name;
3388
3389	ids = &sc->exclude_ids[0];
3390	while((name = strsep(&ids, ",")) != NULL) {
3391		if (name[0] == '\0')
3392			continue;
3393		if (strtol(name, NULL, 0) == (long)id)
3394			return (1);
3395	}
3396
3397	return (0);
3398}
3399
3400void
3401mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3402{
3403	struct mpssas_softc *sassc;
3404	struct mpssas_lun *lun, *lun_tmp;
3405	struct mpssas_target *targ;
3406	int i;
3407
3408	sassc = sc->sassc;
3409	/*
3410	 * The number of targets is based on IOC Facts, so free all of
3411	 * the allocated LUNs for each target and then the target buffer
3412	 * itself.
3413	 */
3414	for (i=0; i< maxtargets; i++) {
3415		targ = &sassc->targets[i];
3416		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3417			free(lun, M_MPT2);
3418		}
3419	}
3420	free(sassc->targets, M_MPT2);
3421
3422	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3423	    M_MPT2, M_WAITOK|M_ZERO);
3424	if (!sassc->targets) {
3425		panic("%s failed to alloc targets with error %d\n",
3426		    __func__, ENOMEM);
3427	}
3428}
3429