mps_sas.c revision 1537078d8f2e62e82de3c08bdcae0fd79dc35a4a
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31 *
32 * $FreeBSD$
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/* Communications core for Avago Technologies (LSI) MPT2 */
39
40/* TODO Move headers to mpsvar */
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/selinfo.h>
46#include <sys/module.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/bio.h>
50#include <sys/malloc.h>
51#include <sys/uio.h>
52#include <sys/sysctl.h>
53#include <sys/endian.h>
54#include <sys/queue.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57#include <sys/sbuf.h>
58
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63#include <machine/stdarg.h>
64
65#include <cam/cam.h>
66#include <cam/cam_ccb.h>
67#include <cam/cam_xpt.h>
68#include <cam/cam_debug.h>
69#include <cam/cam_sim.h>
70#include <cam/cam_xpt_sim.h>
71#include <cam/cam_xpt_periph.h>
72#include <cam/cam_periph.h>
73#include <cam/scsi/scsi_all.h>
74#include <cam/scsi/scsi_message.h>
75#if __FreeBSD_version >= 900026
76#include <cam/scsi/smp_all.h>
77#endif
78
79#include <dev/mps/mpi/mpi2_type.h>
80#include <dev/mps/mpi/mpi2.h>
81#include <dev/mps/mpi/mpi2_ioc.h>
82#include <dev/mps/mpi/mpi2_sas.h>
83#include <dev/mps/mpi/mpi2_cnfg.h>
84#include <dev/mps/mpi/mpi2_init.h>
85#include <dev/mps/mpi/mpi2_tool.h>
86#include <dev/mps/mps_ioctl.h>
87#include <dev/mps/mpsvar.h>
88#include <dev/mps/mps_table.h>
89#include <dev/mps/mps_sas.h>
90
91#define MPSSAS_DISCOVERY_TIMEOUT	20
92#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93
94/*
95 * static array to check SCSI OpCode for EEDP protection bits
96 */
97#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100static uint8_t op_code_prot[256] = {
101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117};
118
119MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
120
121static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124static void mpssas_poll(struct cam_sim *sim);
125static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126    struct mps_command *cm);
127static void mpssas_scsiio_timeout(void *data);
128static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130    struct mps_command *cm, union ccb *ccb);
131static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134#if __FreeBSD_version >= 900026
135static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
137			       uint64_t sasaddr);
138static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139#endif //FreeBSD_version >= 900026
140static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141static void mpssas_async(void *callback_arg, uint32_t code,
142			 struct cam_path *path, void *arg);
143#if (__FreeBSD_version < 901503) || \
144    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146			      struct ccb_getdev *cgd);
147static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
148#endif
149static int mpssas_send_portenable(struct mps_softc *sc);
150static void mpssas_portenable_complete(struct mps_softc *sc,
151    struct mps_command *cm);
152
153struct mpssas_target *
154mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
155{
156	struct mpssas_target *target;
157	int i;
158
159	for (i = start; i < sassc->maxtargets; i++) {
160		target = &sassc->targets[i];
161		if (target->handle == handle)
162			return (target);
163	}
164
165	return (NULL);
166}
167
168/* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery.  Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
174 */
175void
176mpssas_startup_increment(struct mpssas_softc *sassc)
177{
178	MPS_FUNCTRACE(sassc->sc);
179
180	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181		if (sassc->startup_refcount++ == 0) {
182			/* just starting, freeze the simq */
183			mps_dprint(sassc->sc, MPS_INIT,
184			    "%s freezing simq\n", __func__);
185#if __FreeBSD_version >= 1000039
186			xpt_hold_boot();
187#endif
188			xpt_freeze_simq(sassc->sim, 1);
189		}
190		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191		    sassc->startup_refcount);
192	}
193}
194
195void
196mpssas_release_simq_reinit(struct mpssas_softc *sassc)
197{
198	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200		xpt_release_simq(sassc->sim, 1);
201		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
202	}
203}
204
205void
206mpssas_startup_decrement(struct mpssas_softc *sassc)
207{
208	MPS_FUNCTRACE(sassc->sc);
209
210	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211		if (--sassc->startup_refcount == 0) {
212			/* finished all discovery-related actions, release
213			 * the simq and rescan for the latest topology.
214			 */
215			mps_dprint(sassc->sc, MPS_INIT,
216			    "%s releasing simq\n", __func__);
217			sassc->flags &= ~MPSSAS_IN_STARTUP;
218			xpt_release_simq(sassc->sim, 1);
219#if __FreeBSD_version >= 1000039
220			xpt_release_boot();
221#else
222			mpssas_rescan_target(sassc->sc, NULL);
223#endif
224		}
225		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226		    sassc->startup_refcount);
227	}
228}
229
230/* The firmware requires us to stop sending commands when we're doing task
231 * management, so refcount the TMs and keep the simq frozen when any are in
232 * use.
233 */
234struct mps_command *
235mpssas_alloc_tm(struct mps_softc *sc)
236{
237	struct mps_command *tm;
238
239	tm = mps_alloc_high_priority_command(sc);
240	return tm;
241}
242
243void
244mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
245{
246	int target_id = 0xFFFFFFFF;
247
248	if (tm == NULL)
249		return;
250
251	/*
252	 * For TM's the devq is frozen for the device.  Unfreeze it here and
253	 * free the resources used for freezing the devq.  Must clear the
254	 * INRESET flag as well or scsi I/O will not work.
255	 */
256	if (tm->cm_targ != NULL) {
257		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
258		target_id = tm->cm_targ->tid;
259	}
260	if (tm->cm_ccb) {
261		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
262		    target_id);
263		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
264		xpt_free_path(tm->cm_ccb->ccb_h.path);
265		xpt_free_ccb(tm->cm_ccb);
266	}
267
268	mps_free_high_priority_command(sc, tm);
269}
270
271void
272mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
273{
274	struct mpssas_softc *sassc = sc->sassc;
275	path_id_t pathid;
276	target_id_t targetid;
277	union ccb *ccb;
278
279	MPS_FUNCTRACE(sc);
280	pathid = cam_sim_path(sassc->sim);
281	if (targ == NULL)
282		targetid = CAM_TARGET_WILDCARD;
283	else
284		targetid = targ - sassc->targets;
285
286	/*
287	 * Allocate a CCB and schedule a rescan.
288	 */
289	ccb = xpt_alloc_ccb_nowait();
290	if (ccb == NULL) {
291		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
292		return;
293	}
294
295	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
298		xpt_free_ccb(ccb);
299		return;
300	}
301
302	if (targetid == CAM_TARGET_WILDCARD)
303		ccb->ccb_h.func_code = XPT_SCAN_BUS;
304	else
305		ccb->ccb_h.func_code = XPT_SCAN_TGT;
306
307	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
308	xpt_rescan(ccb);
309}
310
311static void
312mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
313{
314	struct sbuf sb;
315	va_list ap;
316	char str[192];
317	char path_str[64];
318
319	if (cm == NULL)
320		return;
321
322	/* No need to be in here if debugging isn't enabled */
323	if ((cm->cm_sc->mps_debug & level) == 0)
324		return;
325
326	sbuf_new(&sb, str, sizeof(str), 0);
327
328	va_start(ap, fmt);
329
330	if (cm->cm_ccb != NULL) {
331		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
332				sizeof(path_str));
333		sbuf_cat(&sb, path_str);
334		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335			scsi_command_string(&cm->cm_ccb->csio, &sb);
336			sbuf_printf(&sb, "length %d ",
337				    cm->cm_ccb->csio.dxfer_len);
338		}
339	}
340	else {
341		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342		    cam_sim_name(cm->cm_sc->sassc->sim),
343		    cam_sim_unit(cm->cm_sc->sassc->sim),
344		    cam_sim_bus(cm->cm_sc->sassc->sim),
345		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346		    cm->cm_lun);
347	}
348
349	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350	sbuf_vprintf(&sb, fmt, ap);
351	sbuf_finish(&sb);
352	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
353
354	va_end(ap);
355}
356
357
358static void
359mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
360{
361	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362	struct mpssas_target *targ;
363	uint16_t handle;
364
365	MPS_FUNCTRACE(sc);
366
367	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
369	targ = tm->cm_targ;
370
371	if (reply == NULL) {
372		/* XXX retry the remove after the diag reset completes? */
373		mps_dprint(sc, MPS_FAULT,
374		    "%s NULL reply resetting device 0x%04x\n", __func__,
375		    handle);
376		mpssas_free_tm(sc, tm);
377		return;
378	}
379
380	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
381	    MPI2_IOCSTATUS_SUCCESS) {
382		mps_dprint(sc, MPS_ERROR,
383		   "IOCStatus = 0x%x while resetting device 0x%x\n",
384		   le16toh(reply->IOCStatus), handle);
385	}
386
387	mps_dprint(sc, MPS_XINFO,
388	    "Reset aborted %u commands\n", reply->TerminationCount);
389	mps_free_reply(sc, tm->cm_reply_data);
390	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391
392	mps_dprint(sc, MPS_XINFO,
393	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
394
395	/*
396	 * Don't clear target if remove fails because things will get confusing.
397	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398	 * this target id if possible, and so we can assign the same target id
399	 * to this device if it comes back in the future.
400	 */
401	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402	    MPI2_IOCSTATUS_SUCCESS) {
403		targ = tm->cm_targ;
404		targ->handle = 0x0;
405		targ->encl_handle = 0x0;
406		targ->encl_slot = 0x0;
407		targ->exp_dev_handle = 0x0;
408		targ->phy_num = 0x0;
409		targ->linkrate = 0x0;
410		targ->devinfo = 0x0;
411		targ->flags = 0x0;
412	}
413
414	mpssas_free_tm(sc, tm);
415}
416
417
418/*
419 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
420 * Otherwise Volume Delete is same as Bare Drive Removal.
421 */
422void
423mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
424{
425	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
426	struct mps_softc *sc;
427	struct mps_command *cm;
428	struct mpssas_target *targ = NULL;
429
430	MPS_FUNCTRACE(sassc->sc);
431	sc = sassc->sc;
432
433#ifdef WD_SUPPORT
434	/*
435	 * If this is a WD controller, determine if the disk should be exposed
436	 * to the OS or not.  If disk should be exposed, return from this
437	 * function without doing anything.
438	 */
439	if (sc->WD_available && (sc->WD_hide_expose ==
440	    MPS_WD_EXPOSE_ALWAYS)) {
441		return;
442	}
443#endif //WD_SUPPORT
444
445	targ = mpssas_find_target_by_handle(sassc, 0, handle);
446	if (targ == NULL) {
447		/* FIXME: what is the action? */
448		/* We don't know about this device? */
449		mps_dprint(sc, MPS_ERROR,
450		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
451		return;
452	}
453
454	targ->flags |= MPSSAS_TARGET_INREMOVAL;
455
456	cm = mpssas_alloc_tm(sc);
457	if (cm == NULL) {
458		mps_dprint(sc, MPS_ERROR,
459		    "%s: command alloc failure\n", __func__);
460		return;
461	}
462
463	mpssas_rescan_target(sc, targ);
464
465	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
466	req->DevHandle = targ->handle;
467	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
468	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
469
470	/* SAS Hard Link Reset / SATA Link Reset */
471	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
472
473	cm->cm_targ = targ;
474	cm->cm_data = NULL;
475	cm->cm_desc.HighPriority.RequestFlags =
476	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
477	cm->cm_complete = mpssas_remove_volume;
478	cm->cm_complete_data = (void *)(uintptr_t)handle;
479
480	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
481	    __func__, targ->tid);
482	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
483
484	mps_map_command(sc, cm);
485}
486
487/*
488 * The MPT2 firmware performs debounce on the link to avoid transient link
489 * errors and false removals.  When it does decide that link has been lost
490 * and a device need to go away, it expects that the host will perform a
491 * target reset and then an op remove.  The reset has the side-effect of
492 * aborting any outstanding requests for the device, which is required for
493 * the op-remove to succeed.  It's not clear if the host should check for
494 * the device coming back alive after the reset.
495 */
496void
497mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
498{
499	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
500	struct mps_softc *sc;
501	struct mps_command *cm;
502	struct mpssas_target *targ = NULL;
503
504	MPS_FUNCTRACE(sassc->sc);
505
506	sc = sassc->sc;
507
508	targ = mpssas_find_target_by_handle(sassc, 0, handle);
509	if (targ == NULL) {
510		/* FIXME: what is the action? */
511		/* We don't know about this device? */
512		mps_dprint(sc, MPS_ERROR,
513		    "%s : invalid handle 0x%x \n", __func__, handle);
514		return;
515	}
516
517	targ->flags |= MPSSAS_TARGET_INREMOVAL;
518
519	cm = mpssas_alloc_tm(sc);
520	if (cm == NULL) {
521		mps_dprint(sc, MPS_ERROR,
522		    "%s: command alloc failure\n", __func__);
523		return;
524	}
525
526	mpssas_rescan_target(sc, targ);
527
528	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
529	memset(req, 0, sizeof(*req));
530	req->DevHandle = htole16(targ->handle);
531	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
532	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
533
534	/* SAS Hard Link Reset / SATA Link Reset */
535	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
536
537	cm->cm_targ = targ;
538	cm->cm_data = NULL;
539	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
540	cm->cm_complete = mpssas_remove_device;
541	cm->cm_complete_data = (void *)(uintptr_t)handle;
542
543	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
544	    __func__, targ->tid);
545	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
546
547	mps_map_command(sc, cm);
548}
549
550static void
551mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
552{
553	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
554	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
555	struct mpssas_target *targ;
556	struct mps_command *next_cm;
557	uint16_t handle;
558
559	MPS_FUNCTRACE(sc);
560
561	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
562	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
563	targ = tm->cm_targ;
564
565	/*
566	 * Currently there should be no way we can hit this case.  It only
567	 * happens when we have a failure to allocate chain frames, and
568	 * task management commands don't have S/G lists.
569	 */
570	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
571		mps_dprint(sc, MPS_ERROR,
572		    "%s: cm_flags = %#x for remove of handle %#04x! "
573		    "This should not happen!\n", __func__, tm->cm_flags,
574		    handle);
575	}
576
577	if (reply == NULL) {
578		/* XXX retry the remove after the diag reset completes? */
579		mps_dprint(sc, MPS_FAULT,
580		    "%s NULL reply resetting device 0x%04x\n", __func__,
581		    handle);
582		mpssas_free_tm(sc, tm);
583		return;
584	}
585
586	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
587	    MPI2_IOCSTATUS_SUCCESS) {
588		mps_dprint(sc, MPS_ERROR,
589		   "IOCStatus = 0x%x while resetting device 0x%x\n",
590		   le16toh(reply->IOCStatus), handle);
591	}
592
593	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
594	    le32toh(reply->TerminationCount));
595	mps_free_reply(sc, tm->cm_reply_data);
596	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
597
598	/* Reuse the existing command */
599	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
600	memset(req, 0, sizeof(*req));
601	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
602	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
603	req->DevHandle = htole16(handle);
604	tm->cm_data = NULL;
605	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
606	tm->cm_complete = mpssas_remove_complete;
607	tm->cm_complete_data = (void *)(uintptr_t)handle;
608
609	mps_map_command(sc, tm);
610
611	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
612		   targ->tid, handle);
613	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614		union ccb *ccb;
615
616		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
617		ccb = tm->cm_complete_data;
618		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
619		mpssas_scsiio_complete(sc, tm);
620	}
621}
622
623static void
624mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
625{
626	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
627	uint16_t handle;
628	struct mpssas_target *targ;
629	struct mpssas_lun *lun;
630
631	MPS_FUNCTRACE(sc);
632
633	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
634	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635
636	/*
637	 * Currently there should be no way we can hit this case.  It only
638	 * happens when we have a failure to allocate chain frames, and
639	 * task management commands don't have S/G lists.
640	 */
641	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
642		mps_dprint(sc, MPS_XINFO,
643			   "%s: cm_flags = %#x for remove of handle %#04x! "
644			   "This should not happen!\n", __func__, tm->cm_flags,
645			   handle);
646		mpssas_free_tm(sc, tm);
647		return;
648	}
649
650	if (reply == NULL) {
651		/* most likely a chip reset */
652		mps_dprint(sc, MPS_FAULT,
653		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
654		mpssas_free_tm(sc, tm);
655		return;
656	}
657
658	mps_dprint(sc, MPS_XINFO,
659	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
660	    handle, le16toh(reply->IOCStatus));
661
662	/*
663	 * Don't clear target if remove fails because things will get confusing.
664	 * Leave the devname and sasaddr intact so that we know to avoid reusing
665	 * this target id if possible, and so we can assign the same target id
666	 * to this device if it comes back in the future.
667	 */
668	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
669	    MPI2_IOCSTATUS_SUCCESS) {
670		targ = tm->cm_targ;
671		targ->handle = 0x0;
672		targ->encl_handle = 0x0;
673		targ->encl_slot = 0x0;
674		targ->exp_dev_handle = 0x0;
675		targ->phy_num = 0x0;
676		targ->linkrate = 0x0;
677		targ->devinfo = 0x0;
678		targ->flags = 0x0;
679
680		while(!SLIST_EMPTY(&targ->luns)) {
681			lun = SLIST_FIRST(&targ->luns);
682			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
683			free(lun, M_MPT2);
684		}
685	}
686
687
688	mpssas_free_tm(sc, tm);
689}
690
691static int
692mpssas_register_events(struct mps_softc *sc)
693{
694	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695
696	bzero(events, 16);
697	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
698	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
699	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
700	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
701	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
702	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
703	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
704	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
705	setbit(events, MPI2_EVENT_IR_VOLUME);
706	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
707	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
708	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
709
710	mps_register_events(sc, events, mpssas_evt_handler, NULL,
711	    &sc->sassc->mpssas_eh);
712
713	return (0);
714}
715
716int
717mps_attach_sas(struct mps_softc *sc)
718{
719	struct mpssas_softc *sassc;
720	cam_status status;
721	int unit, error = 0;
722
723	MPS_FUNCTRACE(sc);
724	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
725
726	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
727	if(!sassc) {
728		mps_dprint(sc, MPS_INIT|MPS_ERROR,
729		    "Cannot allocate SAS controller memory\n");
730		return (ENOMEM);
731	}
732
733	/*
734	 * XXX MaxTargets could change during a reinit.  Since we don't
735	 * resize the targets[] array during such an event, cache the value
736	 * of MaxTargets here so that we don't get into trouble later.  This
737	 * should move into the reinit logic.
738	 */
739	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
740	sassc->targets = malloc(sizeof(struct mpssas_target) *
741	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
742	if(!sassc->targets) {
743		mps_dprint(sc, MPS_INIT|MPS_ERROR,
744		    "Cannot allocate SAS target memory\n");
745		free(sassc, M_MPT2);
746		return (ENOMEM);
747	}
748	sc->sassc = sassc;
749	sassc->sc = sc;
750
751	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
752		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
753		error = ENOMEM;
754		goto out;
755	}
756
757	unit = device_get_unit(sc->mps_dev);
758	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
759	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
760	if (sassc->sim == NULL) {
761		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
762		error = EINVAL;
763		goto out;
764	}
765
766	TAILQ_INIT(&sassc->ev_queue);
767
768	/* Initialize taskqueue for Event Handling */
769	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
770	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
771	    taskqueue_thread_enqueue, &sassc->ev_tq);
772	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773	    device_get_nameunit(sc->mps_dev));
774
775	mps_lock(sc);
776
777	/*
778	 * XXX There should be a bus for every port on the adapter, but since
779	 * we're just going to fake the topology for now, we'll pretend that
780	 * everything is just a target on a single bus.
781	 */
782	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
783		mps_dprint(sc, MPS_INIT|MPS_ERROR,
784		    "Error %d registering SCSI bus\n", error);
785		mps_unlock(sc);
786		goto out;
787	}
788
789	/*
790	 * Assume that discovery events will start right away.
791	 *
792	 * Hold off boot until discovery is complete.
793	 */
794	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
795	sc->sassc->startup_refcount = 0;
796	mpssas_startup_increment(sassc);
797
798	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
799
800	/*
801	 * Register for async events so we can determine the EEDP
802	 * capabilities of devices.
803	 */
804	status = xpt_create_path(&sassc->path, /*periph*/NULL,
805	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806	    CAM_LUN_WILDCARD);
807	if (status != CAM_REQ_CMP) {
808		mps_dprint(sc, MPS_ERROR|MPS_INIT,
809		    "Error %#x creating sim path\n", status);
810		sassc->path = NULL;
811	} else {
812		int event;
813
814#if (__FreeBSD_version >= 1000006) || \
815    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
816		event = AC_ADVINFO_CHANGED;
817#else
818		event = AC_FOUND_DEVICE;
819#endif
820		status = xpt_register_async(event, mpssas_async, sc,
821					    sassc->path);
822		if (status != CAM_REQ_CMP) {
823			mps_dprint(sc, MPS_ERROR,
824			    "Error %#x registering async handler for "
825			    "AC_ADVINFO_CHANGED events\n", status);
826			xpt_free_path(sassc->path);
827			sassc->path = NULL;
828		}
829	}
830	if (status != CAM_REQ_CMP) {
831		/*
832		 * EEDP use is the exception, not the rule.
833		 * Warn the user, but do not fail to attach.
834		 */
835		mps_printf(sc, "EEDP capabilities disabled.\n");
836	}
837
838	mps_unlock(sc);
839
840	mpssas_register_events(sc);
841out:
842	if (error)
843		mps_detach_sas(sc);
844
845	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
846	return (error);
847}
848
849int
850mps_detach_sas(struct mps_softc *sc)
851{
852	struct mpssas_softc *sassc;
853	struct mpssas_lun *lun, *lun_tmp;
854	struct mpssas_target *targ;
855	int i;
856
857	MPS_FUNCTRACE(sc);
858
859	if (sc->sassc == NULL)
860		return (0);
861
862	sassc = sc->sassc;
863	mps_deregister_events(sc, sassc->mpssas_eh);
864
865	/*
866	 * Drain and free the event handling taskqueue with the lock
867	 * unheld so that any parallel processing tasks drain properly
868	 * without deadlocking.
869	 */
870	if (sassc->ev_tq != NULL)
871		taskqueue_free(sassc->ev_tq);
872
873	/* Make sure CAM doesn't wedge if we had to bail out early. */
874	mps_lock(sc);
875
876	while (sassc->startup_refcount != 0)
877		mpssas_startup_decrement(sassc);
878
879	/* Deregister our async handler */
880	if (sassc->path != NULL) {
881		xpt_register_async(0, mpssas_async, sc, sassc->path);
882		xpt_free_path(sassc->path);
883		sassc->path = NULL;
884	}
885
886	if (sassc->flags & MPSSAS_IN_STARTUP)
887		xpt_release_simq(sassc->sim, 1);
888
889	if (sassc->sim != NULL) {
890		xpt_bus_deregister(cam_sim_path(sassc->sim));
891		cam_sim_free(sassc->sim, FALSE);
892	}
893
894	mps_unlock(sc);
895
896	if (sassc->devq != NULL)
897		cam_simq_free(sassc->devq);
898
899	for(i=0; i< sassc->maxtargets ;i++) {
900		targ = &sassc->targets[i];
901		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
902			free(lun, M_MPT2);
903		}
904	}
905	free(sassc->targets, M_MPT2);
906	free(sassc, M_MPT2);
907	sc->sassc = NULL;
908
909	return (0);
910}
911
912void
913mpssas_discovery_end(struct mpssas_softc *sassc)
914{
915	struct mps_softc *sc = sassc->sc;
916
917	MPS_FUNCTRACE(sc);
918
919	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
920		callout_stop(&sassc->discovery_callout);
921
922	/*
923	 * After discovery has completed, check the mapping table for any
924	 * missing devices and update their missing counts. Only do this once
925	 * whenever the driver is initialized so that missing counts aren't
926	 * updated unnecessarily. Note that just because discovery has
927	 * completed doesn't mean that events have been processed yet. The
928	 * check_devices function is a callout timer that checks if ALL devices
929	 * are missing. If so, it will wait a little longer for events to
930	 * complete and keep resetting itself until some device in the mapping
931	 * table is not missing, meaning that event processing has started.
932	 */
933	if (sc->track_mapping_events) {
934		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
935		    "completed. Check for missing devices in the mapping "
936		    "table.\n");
937		callout_reset(&sc->device_check_callout,
938		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
939		    sc);
940	}
941}
942
943static void
944mpssas_action(struct cam_sim *sim, union ccb *ccb)
945{
946	struct mpssas_softc *sassc;
947
948	sassc = cam_sim_softc(sim);
949
950	MPS_FUNCTRACE(sassc->sc);
951	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
952	    ccb->ccb_h.func_code);
953	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
954
955	switch (ccb->ccb_h.func_code) {
956	case XPT_PATH_INQ:
957	{
958		struct ccb_pathinq *cpi = &ccb->cpi;
959		struct mps_softc *sc = sassc->sc;
960		uint8_t sges_per_frame;
961
962		cpi->version_num = 1;
963		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
964		cpi->target_sprt = 0;
965#if __FreeBSD_version >= 1000039
966		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
967#else
968		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
969#endif
970		cpi->hba_eng_cnt = 0;
971		cpi->max_target = sassc->maxtargets - 1;
972		cpi->max_lun = 255;
973
974		/*
975		 * initiator_id is set here to an ID outside the set of valid
976		 * target IDs (including volumes).
977		 */
978		cpi->initiator_id = sassc->maxtargets;
979		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
980		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
981		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
982		cpi->unit_number = cam_sim_unit(sim);
983		cpi->bus_id = cam_sim_bus(sim);
984		cpi->base_transfer_speed = 150000;
985		cpi->transport = XPORT_SAS;
986		cpi->transport_version = 0;
987		cpi->protocol = PROTO_SCSI;
988		cpi->protocol_version = SCSI_REV_SPC;
989
990		/*
991		 * Max IO Size is Page Size * the following:
992		 * ((SGEs per frame - 1 for chain element) *
993		 * Max Chain Depth) + 1 for no chain needed in last frame
994		 *
995		 * If user suggests a Max IO size to use, use the smaller of the
996		 * user's value and the calculated value as long as the user's
997		 * value is larger than 0. The user's value is in pages.
998		 */
999		sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
1000		    sizeof(MPI2_SGE_SIMPLE64)) - 1;
1001		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1002		cpi->maxio *= PAGE_SIZE;
1003		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1004		    cpi->maxio))
1005			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1006		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1007		break;
1008	}
1009	case XPT_GET_TRAN_SETTINGS:
1010	{
1011		struct ccb_trans_settings	*cts;
1012		struct ccb_trans_settings_sas	*sas;
1013		struct ccb_trans_settings_scsi	*scsi;
1014		struct mpssas_target *targ;
1015
1016		cts = &ccb->cts;
1017		sas = &cts->xport_specific.sas;
1018		scsi = &cts->proto_specific.scsi;
1019
1020		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1021		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1022		    cts->ccb_h.target_id));
1023		targ = &sassc->targets[cts->ccb_h.target_id];
1024		if (targ->handle == 0x0) {
1025			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1026			break;
1027		}
1028
1029		cts->protocol_version = SCSI_REV_SPC2;
1030		cts->transport = XPORT_SAS;
1031		cts->transport_version = 0;
1032
1033		sas->valid = CTS_SAS_VALID_SPEED;
1034		switch (targ->linkrate) {
1035		case 0x08:
1036			sas->bitrate = 150000;
1037			break;
1038		case 0x09:
1039			sas->bitrate = 300000;
1040			break;
1041		case 0x0a:
1042			sas->bitrate = 600000;
1043			break;
1044		default:
1045			sas->valid = 0;
1046		}
1047
1048		cts->protocol = PROTO_SCSI;
1049		scsi->valid = CTS_SCSI_VALID_TQ;
1050		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1051
1052		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1053		break;
1054	}
1055	case XPT_CALC_GEOMETRY:
1056		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1057		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1058		break;
1059	case XPT_RESET_DEV:
1060		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1061		mpssas_action_resetdev(sassc, ccb);
1062		return;
1063	case XPT_RESET_BUS:
1064	case XPT_ABORT:
1065	case XPT_TERM_IO:
1066		mps_dprint(sassc->sc, MPS_XINFO,
1067		    "mpssas_action faking success for abort or reset\n");
1068		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1069		break;
1070	case XPT_SCSI_IO:
1071		mpssas_action_scsiio(sassc, ccb);
1072		return;
1073#if __FreeBSD_version >= 900026
1074	case XPT_SMP_IO:
1075		mpssas_action_smpio(sassc, ccb);
1076		return;
1077#endif
1078	default:
1079		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1080		break;
1081	}
1082	xpt_done(ccb);
1083
1084}
1085
1086static void
1087mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1088    target_id_t target_id, lun_id_t lun_id)
1089{
1090	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1091	struct cam_path *path;
1092
1093	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1094	    ac_code, target_id, (uintmax_t)lun_id);
1095
1096	if (xpt_create_path(&path, NULL,
1097		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1098		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1099			   "notification\n");
1100		return;
1101	}
1102
1103	xpt_async(ac_code, path, NULL);
1104	xpt_free_path(path);
1105}
1106
1107static void
1108mpssas_complete_all_commands(struct mps_softc *sc)
1109{
1110	struct mps_command *cm;
1111	int i;
1112	int completed;
1113
1114	MPS_FUNCTRACE(sc);
1115	mtx_assert(&sc->mps_mtx, MA_OWNED);
1116
1117	/* complete all commands with a NULL reply */
1118	for (i = 1; i < sc->num_reqs; i++) {
1119		cm = &sc->commands[i];
1120		cm->cm_reply = NULL;
1121		completed = 0;
1122
1123		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1124			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1125
1126		if (cm->cm_complete != NULL) {
1127			mpssas_log_command(cm, MPS_RECOVERY,
1128			    "completing cm %p state %x ccb %p for diag reset\n",
1129			    cm, cm->cm_state, cm->cm_ccb);
1130
1131			cm->cm_complete(sc, cm);
1132			completed = 1;
1133		}
1134
1135		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1136			mpssas_log_command(cm, MPS_RECOVERY,
1137			    "waking up cm %p state %x ccb %p for diag reset\n",
1138			    cm, cm->cm_state, cm->cm_ccb);
1139			wakeup(cm);
1140			completed = 1;
1141		}
1142
1143		if (cm->cm_sc->io_cmds_active != 0)
1144			cm->cm_sc->io_cmds_active--;
1145
1146		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1147			/* this should never happen, but if it does, log */
1148			mpssas_log_command(cm, MPS_RECOVERY,
1149			    "cm %p state %x flags 0x%x ccb %p during diag "
1150			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1151			    cm->cm_ccb);
1152		}
1153	}
1154}
1155
1156void
1157mpssas_handle_reinit(struct mps_softc *sc)
1158{
1159	int i;
1160
1161	/* Go back into startup mode and freeze the simq, so that CAM
1162	 * doesn't send any commands until after we've rediscovered all
1163	 * targets and found the proper device handles for them.
1164	 *
1165	 * After the reset, portenable will trigger discovery, and after all
1166	 * discovery-related activities have finished, the simq will be
1167	 * released.
1168	 */
1169	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1170	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1171	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1172	mpssas_startup_increment(sc->sassc);
1173
1174	/* notify CAM of a bus reset */
1175	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1176	    CAM_LUN_WILDCARD);
1177
1178	/* complete and cleanup after all outstanding commands */
1179	mpssas_complete_all_commands(sc);
1180
1181	mps_dprint(sc, MPS_INIT,
1182	    "%s startup %u after command completion\n", __func__,
1183	    sc->sassc->startup_refcount);
1184
1185	/* zero all the target handles, since they may change after the
1186	 * reset, and we have to rediscover all the targets and use the new
1187	 * handles.
1188	 */
1189	for (i = 0; i < sc->sassc->maxtargets; i++) {
1190		if (sc->sassc->targets[i].outstanding != 0)
1191			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1192			    i, sc->sassc->targets[i].outstanding);
1193		sc->sassc->targets[i].handle = 0x0;
1194		sc->sassc->targets[i].exp_dev_handle = 0x0;
1195		sc->sassc->targets[i].outstanding = 0;
1196		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1197	}
1198}
1199
1200static void
1201mpssas_tm_timeout(void *data)
1202{
1203	struct mps_command *tm = data;
1204	struct mps_softc *sc = tm->cm_sc;
1205
1206	mtx_assert(&sc->mps_mtx, MA_OWNED);
1207
1208	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1209	    "task mgmt %p timed out\n", tm);
1210	mps_reinit(sc);
1211}
1212
1213static void
1214mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1215{
1216	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1217	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1218	unsigned int cm_count = 0;
1219	struct mps_command *cm;
1220	struct mpssas_target *targ;
1221
1222	callout_stop(&tm->cm_callout);
1223
1224	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1225	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1226	targ = tm->cm_targ;
1227
1228	/*
1229	 * Currently there should be no way we can hit this case.  It only
1230	 * happens when we have a failure to allocate chain frames, and
1231	 * task management commands don't have S/G lists.
1232	 * XXXSL So should it be an assertion?
1233	 */
1234	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1235		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1236		    "%s: cm_flags = %#x for LUN reset! "
1237		   "This should not happen!\n", __func__, tm->cm_flags);
1238		mpssas_free_tm(sc, tm);
1239		return;
1240	}
1241
1242	if (reply == NULL) {
1243		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1244		    tm);
1245		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1246			/* this completion was due to a reset, just cleanup */
1247			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1248			    "reset, ignoring NULL LUN reset reply\n");
1249			targ->tm = NULL;
1250			mpssas_free_tm(sc, tm);
1251		}
1252		else {
1253			/* we should have gotten a reply. */
1254			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1255			    "LUN reset attempt, resetting controller\n");
1256			mps_reinit(sc);
1257		}
1258		return;
1259	}
1260
1261	mps_dprint(sc, MPS_RECOVERY,
1262	    "logical unit reset status 0x%x code 0x%x count %u\n",
1263	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1264	    le32toh(reply->TerminationCount));
1265
1266	/*
1267	 * See if there are any outstanding commands for this LUN.
1268	 * This could be made more efficient by using a per-LU data
1269	 * structure of some sort.
1270	 */
1271	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1272		if (cm->cm_lun == tm->cm_lun)
1273			cm_count++;
1274	}
1275
1276	if (cm_count == 0) {
1277		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1278		    "Finished recovery after LUN reset for target %u\n",
1279		    targ->tid);
1280
1281		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1282
1283		/*
1284		 * We've finished recovery for this logical unit.  check and
1285		 * see if some other logical unit has a timedout command
1286		 * that needs to be processed.
1287		 */
1288		cm = TAILQ_FIRST(&targ->timedout_commands);
1289		if (cm) {
1290			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1291			    "More commands to abort for target %u\n",
1292			    targ->tid);
1293			mpssas_send_abort(sc, tm, cm);
1294		} else {
1295			targ->tm = NULL;
1296			mpssas_free_tm(sc, tm);
1297		}
1298	} else {
1299		/*
1300		 * If we still have commands for this LUN, the reset
1301		 * effectively failed, regardless of the status reported.
1302		 * Escalate to a target reset.
1303		 */
1304		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1305		    "logical unit reset complete for target %u, but still "
1306		    "have %u command(s), sending target reset\n", targ->tid,
1307		    cm_count);
1308		mpssas_send_reset(sc, tm,
1309		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1310	}
1311}
1312
1313static void
1314mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1315{
1316	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1317	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1318	struct mpssas_target *targ;
1319
1320	callout_stop(&tm->cm_callout);
1321
1322	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1323	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1324	targ = tm->cm_targ;
1325
1326	/*
1327	 * Currently there should be no way we can hit this case.  It only
1328	 * happens when we have a failure to allocate chain frames, and
1329	 * task management commands don't have S/G lists.
1330	 */
1331	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1332		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1333			   "This should not happen!\n", __func__, tm->cm_flags);
1334		mpssas_free_tm(sc, tm);
1335		return;
1336	}
1337
1338	if (reply == NULL) {
1339		mps_dprint(sc, MPS_RECOVERY,
1340		    "NULL target reset reply for tm %pi TaskMID %u\n",
1341		    tm, le16toh(req->TaskMID));
1342		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1343			/* this completion was due to a reset, just cleanup */
1344			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1345			    "reset, ignoring NULL target reset reply\n");
1346			targ->tm = NULL;
1347			mpssas_free_tm(sc, tm);
1348		} else {
1349			/* we should have gotten a reply. */
1350			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1351			    "target reset attempt, resetting controller\n");
1352			mps_reinit(sc);
1353		}
1354		return;
1355	}
1356
1357	mps_dprint(sc, MPS_RECOVERY,
1358	    "target reset status 0x%x code 0x%x count %u\n",
1359	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1360	    le32toh(reply->TerminationCount));
1361
1362	if (targ->outstanding == 0) {
1363		/* we've finished recovery for this target and all
1364		 * of its logical units.
1365		 */
1366		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1367		    "Finished reset recovery for target %u\n", targ->tid);
1368
1369		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1370		    CAM_LUN_WILDCARD);
1371
1372		targ->tm = NULL;
1373		mpssas_free_tm(sc, tm);
1374	} else {
1375		/*
1376		 * After a target reset, if this target still has
1377		 * outstanding commands, the reset effectively failed,
1378		 * regardless of the status reported.  escalate.
1379		 */
1380		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1381		    "Target reset complete for target %u, but still have %u "
1382		    "command(s), resetting controller\n", targ->tid,
1383		    targ->outstanding);
1384		mps_reinit(sc);
1385	}
1386}
1387
1388#define MPS_RESET_TIMEOUT 30
1389
1390int
1391mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1392{
1393	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1394	struct mpssas_target *target;
1395	int err;
1396
1397	target = tm->cm_targ;
1398	if (target->handle == 0) {
1399		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1400		    __func__, target->tid);
1401		return -1;
1402	}
1403
1404	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1405	req->DevHandle = htole16(target->handle);
1406	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1407	req->TaskType = type;
1408
1409	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1410		/* XXX Need to handle invalid LUNs */
1411		MPS_SET_LUN(req->LUN, tm->cm_lun);
1412		tm->cm_targ->logical_unit_resets++;
1413		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1414		    "Sending logical unit reset to target %u lun %d\n",
1415		    target->tid, tm->cm_lun);
1416		tm->cm_complete = mpssas_logical_unit_reset_complete;
1417		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1418	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1419		/*
1420		 * Target reset method =
1421		 * 	SAS Hard Link Reset / SATA Link Reset
1422		 */
1423		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1424		tm->cm_targ->target_resets++;
1425		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1426		    "Sending target reset to target %u\n", target->tid);
1427		tm->cm_complete = mpssas_target_reset_complete;
1428		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1429	} else {
1430		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1431		return -1;
1432	}
1433
1434	tm->cm_data = NULL;
1435	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1436	tm->cm_complete_data = (void *)tm;
1437
1438	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1439	    mpssas_tm_timeout, tm);
1440
1441	err = mps_map_command(sc, tm);
1442	if (err)
1443		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1444		    "error %d sending reset type %u\n",
1445		    err, type);
1446
1447	return err;
1448}
1449
1450
1451static void
1452mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1453{
1454	struct mps_command *cm;
1455	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1456	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1457	struct mpssas_target *targ;
1458
1459	callout_stop(&tm->cm_callout);
1460
1461	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1462	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1463	targ = tm->cm_targ;
1464
1465	/*
1466	 * Currently there should be no way we can hit this case.  It only
1467	 * happens when we have a failure to allocate chain frames, and
1468	 * task management commands don't have S/G lists.
1469	 */
1470	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1471		mps_dprint(sc, MPS_RECOVERY,
1472		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1473		    tm->cm_flags, tm, le16toh(req->TaskMID));
1474		mpssas_free_tm(sc, tm);
1475		return;
1476	}
1477
1478	if (reply == NULL) {
1479		mps_dprint(sc, MPS_RECOVERY,
1480		    "NULL abort reply for tm %p TaskMID %u\n",
1481		    tm, le16toh(req->TaskMID));
1482		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1483			/* this completion was due to a reset, just cleanup */
1484			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1485			    "reset, ignoring NULL abort reply\n");
1486			targ->tm = NULL;
1487			mpssas_free_tm(sc, tm);
1488		} else {
1489			/* we should have gotten a reply. */
1490			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1491			    "abort attempt, resetting controller\n");
1492			mps_reinit(sc);
1493		}
1494		return;
1495	}
1496
1497	mps_dprint(sc, MPS_RECOVERY,
1498	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1499	    le16toh(req->TaskMID),
1500	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1501	    le32toh(reply->TerminationCount));
1502
1503	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1504	if (cm == NULL) {
1505		/*
1506		 * If there are no more timedout commands, we're done with
1507		 * error recovery for this target.
1508		 */
1509		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1510		    "Finished abort recovery for target %u\n", targ->tid);
1511
1512		targ->tm = NULL;
1513		mpssas_free_tm(sc, tm);
1514	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1515		/* abort success, but we have more timedout commands to abort */
1516		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1517		    "Continuing abort recovery for target %u\n", targ->tid);
1518
1519		mpssas_send_abort(sc, tm, cm);
1520	} else {
1521		/* we didn't get a command completion, so the abort
1522		 * failed as far as we're concerned.  escalate.
1523		 */
1524		mps_dprint(sc, MPS_RECOVERY,
1525		    "Abort failed for target %u, sending logical unit reset\n",
1526		    targ->tid);
1527
1528		mpssas_send_reset(sc, tm,
1529		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1530	}
1531}
1532
1533#define MPS_ABORT_TIMEOUT 5
1534
1535static int
1536mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1537{
1538	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1539	struct mpssas_target *targ;
1540	int err;
1541
1542	targ = cm->cm_targ;
1543	if (targ->handle == 0) {
1544		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1545		    "%s null devhandle for target_id %d\n",
1546		    __func__, cm->cm_ccb->ccb_h.target_id);
1547		return -1;
1548	}
1549
1550	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1551	    "Aborting command %p\n", cm);
1552
1553	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1554	req->DevHandle = htole16(targ->handle);
1555	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1556	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1557
1558	/* XXX Need to handle invalid LUNs */
1559	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1560
1561	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1562
1563	tm->cm_data = NULL;
1564	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1565	tm->cm_complete = mpssas_abort_complete;
1566	tm->cm_complete_data = (void *)tm;
1567	tm->cm_targ = cm->cm_targ;
1568	tm->cm_lun = cm->cm_lun;
1569
1570	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1571	    mpssas_tm_timeout, tm);
1572
1573	targ->aborts++;
1574
1575	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1576
1577	err = mps_map_command(sc, tm);
1578	if (err)
1579		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1580		    "error %d sending abort for cm %p SMID %u\n",
1581		    err, cm, req->TaskMID);
1582	return err;
1583}
1584
1585static void
1586mpssas_scsiio_timeout(void *data)
1587{
1588	sbintime_t elapsed, now;
1589	union ccb *ccb;
1590	struct mps_softc *sc;
1591	struct mps_command *cm;
1592	struct mpssas_target *targ;
1593
1594	cm = (struct mps_command *)data;
1595	sc = cm->cm_sc;
1596	ccb = cm->cm_ccb;
1597	now = sbinuptime();
1598
1599	MPS_FUNCTRACE(sc);
1600	mtx_assert(&sc->mps_mtx, MA_OWNED);
1601
1602	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1603
1604	/*
1605	 * Run the interrupt handler to make sure it's not pending.  This
1606	 * isn't perfect because the command could have already completed
1607	 * and been re-used, though this is unlikely.
1608	 */
1609	mps_intr_locked(sc);
1610	if (cm->cm_state == MPS_CM_STATE_FREE) {
1611		mpssas_log_command(cm, MPS_XINFO,
1612		    "SCSI command %p almost timed out\n", cm);
1613		return;
1614	}
1615
1616	if (cm->cm_ccb == NULL) {
1617		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1618		return;
1619	}
1620
1621	targ = cm->cm_targ;
1622	targ->timeouts++;
1623
1624	elapsed = now - ccb->ccb_h.qos.sim_data;
1625	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1626	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1627	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1628	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1629
1630	/* XXX first, check the firmware state, to see if it's still
1631	 * operational.  if not, do a diag reset.
1632	 */
1633	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1634	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1635	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1636
1637	if (targ->tm != NULL) {
1638		/* target already in recovery, just queue up another
1639		 * timedout command to be processed later.
1640		 */
1641		mps_dprint(sc, MPS_RECOVERY,
1642		    "queued timedout cm %p for processing by tm %p\n",
1643		    cm, targ->tm);
1644	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1645		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1646		    "Sending abort to target %u for SMID %d\n", targ->tid,
1647		    cm->cm_desc.Default.SMID);
1648		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1649		    cm, targ->tm);
1650
1651		/* start recovery by aborting the first timedout command */
1652		mpssas_send_abort(sc, targ->tm, cm);
1653	} else {
1654		/* XXX queue this target up for recovery once a TM becomes
1655		 * available.  The firmware only has a limited number of
1656		 * HighPriority credits for the high priority requests used
1657		 * for task management, and we ran out.
1658		 *
1659		 * Isilon: don't worry about this for now, since we have
1660		 * more credits than disks in an enclosure, and limit
1661		 * ourselves to one TM per target for recovery.
1662		 */
1663		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1664		    "timedout cm %p failed to allocate a tm\n", cm);
1665	}
1666
1667}
1668
1669static void
1670mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1671{
1672	MPI2_SCSI_IO_REQUEST *req;
1673	struct ccb_scsiio *csio;
1674	struct mps_softc *sc;
1675	struct mpssas_target *targ;
1676	struct mpssas_lun *lun;
1677	struct mps_command *cm;
1678	uint8_t i, lba_byte, *ref_tag_addr;
1679	uint16_t eedp_flags;
1680	uint32_t mpi_control;
1681
1682	sc = sassc->sc;
1683	MPS_FUNCTRACE(sc);
1684	mtx_assert(&sc->mps_mtx, MA_OWNED);
1685
1686	csio = &ccb->csio;
1687	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1688	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1689	     csio->ccb_h.target_id));
1690	targ = &sassc->targets[csio->ccb_h.target_id];
1691	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1692	if (targ->handle == 0x0) {
1693		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1694		    __func__, csio->ccb_h.target_id);
1695		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1696		xpt_done(ccb);
1697		return;
1698	}
1699	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1700		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1701		    "supported %u\n", __func__, csio->ccb_h.target_id);
1702		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1703		xpt_done(ccb);
1704		return;
1705	}
1706	/*
1707	 * Sometimes, it is possible to get a command that is not "In
1708	 * Progress" and was actually aborted by the upper layer.  Check for
1709	 * this here and complete the command without error.
1710	 */
1711	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1712		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1713		    "target %u\n", __func__, csio->ccb_h.target_id);
1714		xpt_done(ccb);
1715		return;
1716	}
1717	/*
1718	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1719	 * that the volume has timed out.  We want volumes to be enumerated
1720	 * until they are deleted/removed, not just failed.
1721	 */
1722	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1723		if (targ->devinfo == 0)
1724			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1725		else
1726			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1727		xpt_done(ccb);
1728		return;
1729	}
1730
1731	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1732		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1733		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1734		xpt_done(ccb);
1735		return;
1736	}
1737
1738	/*
1739	 * If target has a reset in progress, freeze the devq and return.  The
1740	 * devq will be released when the TM reset is finished.
1741	 */
1742	if (targ->flags & MPSSAS_TARGET_INRESET) {
1743		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1744		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1745		    __func__, targ->tid);
1746		xpt_freeze_devq(ccb->ccb_h.path, 1);
1747		xpt_done(ccb);
1748		return;
1749	}
1750
1751	cm = mps_alloc_command(sc);
1752	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1753		if (cm != NULL) {
1754			mps_free_command(sc, cm);
1755		}
1756		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1757			xpt_freeze_simq(sassc->sim, 1);
1758			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1759		}
1760		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1761		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1762		xpt_done(ccb);
1763		return;
1764	}
1765
1766	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1767	bzero(req, sizeof(*req));
1768	req->DevHandle = htole16(targ->handle);
1769	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1770	req->MsgFlags = 0;
1771	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1772	req->SenseBufferLength = MPS_SENSE_LEN;
1773	req->SGLFlags = 0;
1774	req->ChainOffset = 0;
1775	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1776	req->SGLOffset1= 0;
1777	req->SGLOffset2= 0;
1778	req->SGLOffset3= 0;
1779	req->SkipCount = 0;
1780	req->DataLength = htole32(csio->dxfer_len);
1781	req->BidirectionalDataLength = 0;
1782	req->IoFlags = htole16(csio->cdb_len);
1783	req->EEDPFlags = 0;
1784
1785	/* Note: BiDirectional transfers are not supported */
1786	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1787	case CAM_DIR_IN:
1788		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1789		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1790		break;
1791	case CAM_DIR_OUT:
1792		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1793		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1794		break;
1795	case CAM_DIR_NONE:
1796	default:
1797		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1798		break;
1799	}
1800
1801	if (csio->cdb_len == 32)
1802                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1803	/*
1804	 * It looks like the hardware doesn't require an explicit tag
1805	 * number for each transaction.  SAM Task Management not supported
1806	 * at the moment.
1807	 */
1808	switch (csio->tag_action) {
1809	case MSG_HEAD_OF_Q_TAG:
1810		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1811		break;
1812	case MSG_ORDERED_Q_TAG:
1813		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1814		break;
1815	case MSG_ACA_TASK:
1816		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1817		break;
1818	case CAM_TAG_ACTION_NONE:
1819	case MSG_SIMPLE_Q_TAG:
1820	default:
1821		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1822		break;
1823	}
1824	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1825	req->Control = htole32(mpi_control);
1826	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1827		mps_free_command(sc, cm);
1828		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1829		xpt_done(ccb);
1830		return;
1831	}
1832
1833	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1834		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1835	else
1836		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1837	req->IoFlags = htole16(csio->cdb_len);
1838
1839	/*
1840	 * Check if EEDP is supported and enabled.  If it is then check if the
1841	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1842	 * is formatted for EEDP support.  If all of this is true, set CDB up
1843	 * for EEDP transfer.
1844	 */
1845	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1846	if (sc->eedp_enabled && eedp_flags) {
1847		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1848			if (lun->lun_id == csio->ccb_h.target_lun) {
1849				break;
1850			}
1851		}
1852
1853		if ((lun != NULL) && (lun->eedp_formatted)) {
1854			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1855			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1856			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1857			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1858			req->EEDPFlags = htole16(eedp_flags);
1859
1860			/*
1861			 * If CDB less than 32, fill in Primary Ref Tag with
1862			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1863			 * already there.  Also, set protection bit.  FreeBSD
1864			 * currently does not support CDBs bigger than 16, but
1865			 * the code doesn't hurt, and will be here for the
1866			 * future.
1867			 */
1868			if (csio->cdb_len != 32) {
1869				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1870				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1871				    PrimaryReferenceTag;
1872				for (i = 0; i < 4; i++) {
1873					*ref_tag_addr =
1874					    req->CDB.CDB32[lba_byte + i];
1875					ref_tag_addr++;
1876				}
1877				req->CDB.EEDP32.PrimaryReferenceTag =
1878					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1879				req->CDB.EEDP32.PrimaryApplicationTagMask =
1880				    0xFFFF;
1881				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1882				    0x20;
1883			} else {
1884				eedp_flags |=
1885				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1886				req->EEDPFlags = htole16(eedp_flags);
1887				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1888				    0x1F) | 0x20;
1889			}
1890		}
1891	}
1892
1893	cm->cm_length = csio->dxfer_len;
1894	if (cm->cm_length != 0) {
1895		cm->cm_data = ccb;
1896		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1897	} else {
1898		cm->cm_data = NULL;
1899	}
1900	cm->cm_sge = &req->SGL;
1901	cm->cm_sglsize = (32 - 24) * 4;
1902	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1903	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1904	cm->cm_complete = mpssas_scsiio_complete;
1905	cm->cm_complete_data = ccb;
1906	cm->cm_targ = targ;
1907	cm->cm_lun = csio->ccb_h.target_lun;
1908	cm->cm_ccb = ccb;
1909
1910	/*
1911	 * If HBA is a WD and the command is not for a retry, try to build a
1912	 * direct I/O message. If failed, or the command is for a retry, send
1913	 * the I/O to the IR volume itself.
1914	 */
1915	if (sc->WD_valid_config) {
1916		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1917			mpssas_direct_drive_io(sassc, cm, ccb);
1918		} else {
1919			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1920		}
1921	}
1922
1923#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1924	if (csio->bio != NULL)
1925		biotrack(csio->bio, __func__);
1926#endif
1927	csio->ccb_h.qos.sim_data = sbinuptime();
1928	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1929	    mpssas_scsiio_timeout, cm, 0);
1930
1931	targ->issued++;
1932	targ->outstanding++;
1933	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1934	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1935
1936	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1937	    __func__, cm, ccb, targ->outstanding);
1938
1939	mps_map_command(sc, cm);
1940	return;
1941}
1942
1943/**
1944 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1945 */
1946static void
1947mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1948    Mpi2SCSIIOReply_t *mpi_reply)
1949{
1950	u32 response_info;
1951	u8 *response_bytes;
1952	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1953	    MPI2_IOCSTATUS_MASK;
1954	u8 scsi_state = mpi_reply->SCSIState;
1955	u8 scsi_status = mpi_reply->SCSIStatus;
1956	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1957	const char *desc_ioc_state, *desc_scsi_status;
1958
1959	if (log_info == 0x31170000)
1960		return;
1961
1962	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1963	    ioc_status);
1964	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1965	    scsi_status);
1966
1967	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1968	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1969
1970	/*
1971	 *We can add more detail about underflow data here
1972	 * TO-DO
1973	 */
1974	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1975	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1976	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1977	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1978
1979	if (sc->mps_debug & MPS_XINFO &&
1980		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1981		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1982		scsi_sense_print(csio);
1983		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1984	}
1985
1986	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1987		response_info = le32toh(mpi_reply->ResponseInfo);
1988		response_bytes = (u8 *)&response_info;
1989		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1990		    response_bytes[0],
1991		    mps_describe_table(mps_scsi_taskmgmt_string,
1992		    response_bytes[0]));
1993	}
1994}
1995
1996static void
1997mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1998{
1999	MPI2_SCSI_IO_REPLY *rep;
2000	union ccb *ccb;
2001	struct ccb_scsiio *csio;
2002	struct mpssas_softc *sassc;
2003	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2004	u8 *TLR_bits, TLR_on;
2005	int dir = 0, i;
2006	u16 alloc_len;
2007	struct mpssas_target *target;
2008	target_id_t target_id;
2009
2010	MPS_FUNCTRACE(sc);
2011	mps_dprint(sc, MPS_TRACE,
2012	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2013	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2014	    cm->cm_targ->outstanding);
2015
2016	callout_stop(&cm->cm_callout);
2017	mtx_assert(&sc->mps_mtx, MA_OWNED);
2018
2019	sassc = sc->sassc;
2020	ccb = cm->cm_complete_data;
2021	csio = &ccb->csio;
2022	target_id = csio->ccb_h.target_id;
2023	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2024	/*
2025	 * XXX KDM if the chain allocation fails, does it matter if we do
2026	 * the sync and unload here?  It is simpler to do it in every case,
2027	 * assuming it doesn't cause problems.
2028	 */
2029	if (cm->cm_data != NULL) {
2030		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2031			dir = BUS_DMASYNC_POSTREAD;
2032		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2033			dir = BUS_DMASYNC_POSTWRITE;
2034		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2035		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2036	}
2037
2038	cm->cm_targ->completed++;
2039	cm->cm_targ->outstanding--;
2040	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2041	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2042
2043#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2044	if (ccb->csio.bio != NULL)
2045		biotrack(ccb->csio.bio, __func__);
2046#endif
2047
2048	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2049		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2050		if (cm->cm_reply != NULL)
2051			mpssas_log_command(cm, MPS_RECOVERY,
2052			    "completed timedout cm %p ccb %p during recovery "
2053			    "ioc %x scsi %x state %x xfer %u\n",
2054			    cm, cm->cm_ccb,
2055			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2056			    le32toh(rep->TransferCount));
2057		else
2058			mpssas_log_command(cm, MPS_RECOVERY,
2059			    "completed timedout cm %p ccb %p during recovery\n",
2060			    cm, cm->cm_ccb);
2061	} else if (cm->cm_targ->tm != NULL) {
2062		if (cm->cm_reply != NULL)
2063			mpssas_log_command(cm, MPS_RECOVERY,
2064			    "completed cm %p ccb %p during recovery "
2065			    "ioc %x scsi %x state %x xfer %u\n",
2066			    cm, cm->cm_ccb,
2067			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2068			    le32toh(rep->TransferCount));
2069		else
2070			mpssas_log_command(cm, MPS_RECOVERY,
2071			    "completed cm %p ccb %p during recovery\n",
2072			    cm, cm->cm_ccb);
2073	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2074		mpssas_log_command(cm, MPS_RECOVERY,
2075		    "reset completed cm %p ccb %p\n",
2076		    cm, cm->cm_ccb);
2077	}
2078
2079	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2080		/*
2081		 * We ran into an error after we tried to map the command,
2082		 * so we're getting a callback without queueing the command
2083		 * to the hardware.  So we set the status here, and it will
2084		 * be retained below.  We'll go through the "fast path",
2085		 * because there can be no reply when we haven't actually
2086		 * gone out to the hardware.
2087		 */
2088		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2089
2090		/*
2091		 * Currently the only error included in the mask is
2092		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2093		 * chain frames.  We need to freeze the queue until we get
2094		 * a command that completed without this error, which will
2095		 * hopefully have some chain frames attached that we can
2096		 * use.  If we wanted to get smarter about it, we would
2097		 * only unfreeze the queue in this condition when we're
2098		 * sure that we're getting some chain frames back.  That's
2099		 * probably unnecessary.
2100		 */
2101		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2102			xpt_freeze_simq(sassc->sim, 1);
2103			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2104			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2105				   "freezing SIM queue\n");
2106		}
2107	}
2108
2109	/*
2110	 * If this is a Start Stop Unit command and it was issued by the driver
2111	 * during shutdown, decrement the refcount to account for all of the
2112	 * commands that were sent.  All SSU commands should be completed before
2113	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2114	 * is TRUE.
2115	 */
2116	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2117		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2118		sc->SSU_refcount--;
2119	}
2120
2121	/* Take the fast path to completion */
2122	if (cm->cm_reply == NULL) {
2123		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2124			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2125				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2126			else {
2127				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2128				ccb->csio.scsi_status = SCSI_STATUS_OK;
2129			}
2130			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2131				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2132				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2133				mps_dprint(sc, MPS_XINFO,
2134				    "Unfreezing SIM queue\n");
2135			}
2136		}
2137
2138		/*
2139		 * There are two scenarios where the status won't be
2140		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2141		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2142		 */
2143		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2144			/*
2145			 * Freeze the dev queue so that commands are
2146			 * executed in the correct order after error
2147			 * recovery.
2148			 */
2149			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2150			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2151		}
2152		mps_free_command(sc, cm);
2153		xpt_done(ccb);
2154		return;
2155	}
2156
2157	mpssas_log_command(cm, MPS_XINFO,
2158	    "ioc %x scsi %x state %x xfer %u\n",
2159	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2160	    le32toh(rep->TransferCount));
2161
2162	/*
2163	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2164	 * Volume if an error occurred (normal I/O retry).  Use the original
2165	 * CCB, but set a flag that this will be a retry so that it's sent to
2166	 * the original volume.  Free the command but reuse the CCB.
2167	 */
2168	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2169		mps_free_command(sc, cm);
2170		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2171		mpssas_action_scsiio(sassc, ccb);
2172		return;
2173	} else
2174		ccb->ccb_h.sim_priv.entries[0].field = 0;
2175
2176	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2177	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2178		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2179		/* FALLTHROUGH */
2180	case MPI2_IOCSTATUS_SUCCESS:
2181	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2182
2183		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2184		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2185			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2186
2187		/* Completion failed at the transport level. */
2188		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2189		    MPI2_SCSI_STATE_TERMINATED)) {
2190			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2191			break;
2192		}
2193
2194		/* In a modern packetized environment, an autosense failure
2195		 * implies that there's not much else that can be done to
2196		 * recover the command.
2197		 */
2198		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2199			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2200			break;
2201		}
2202
2203		/*
2204		 * CAM doesn't care about SAS Response Info data, but if this is
2205		 * the state check if TLR should be done.  If not, clear the
2206		 * TLR_bits for the target.
2207		 */
2208		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2209		    ((le32toh(rep->ResponseInfo) &
2210		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2211		    MPS_SCSI_RI_INVALID_FRAME)) {
2212			sc->mapping_table[target_id].TLR_bits =
2213			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2214		}
2215
2216		/*
2217		 * Intentionally override the normal SCSI status reporting
2218		 * for these two cases.  These are likely to happen in a
2219		 * multi-initiator environment, and we want to make sure that
2220		 * CAM retries these commands rather than fail them.
2221		 */
2222		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2223		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2224			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2225			break;
2226		}
2227
2228		/* Handle normal status and sense */
2229		csio->scsi_status = rep->SCSIStatus;
2230		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2231			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2232		else
2233			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2234
2235		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2236			int sense_len, returned_sense_len;
2237
2238			returned_sense_len = min(le32toh(rep->SenseCount),
2239			    sizeof(struct scsi_sense_data));
2240			if (returned_sense_len < ccb->csio.sense_len)
2241				ccb->csio.sense_resid = ccb->csio.sense_len -
2242					returned_sense_len;
2243			else
2244				ccb->csio.sense_resid = 0;
2245
2246			sense_len = min(returned_sense_len,
2247			    ccb->csio.sense_len - ccb->csio.sense_resid);
2248			bzero(&ccb->csio.sense_data,
2249			      sizeof(ccb->csio.sense_data));
2250			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2251			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2252		}
2253
2254		/*
2255		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2256		 * and it's page code 0 (Supported Page List), and there is
2257		 * inquiry data, and this is for a sequential access device, and
2258		 * the device is an SSP target, and TLR is supported by the
2259		 * controller, turn the TLR_bits value ON if page 0x90 is
2260		 * supported.
2261		 */
2262		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2263		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2264		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2265		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2266		    (csio->data_ptr != NULL) &&
2267		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2268		    (sc->control_TLR) &&
2269		    (sc->mapping_table[target_id].device_info &
2270		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2271			vpd_list = (struct scsi_vpd_supported_page_list *)
2272			    csio->data_ptr;
2273			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2274			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2275			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2276			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2277			    csio->cdb_io.cdb_bytes[4];
2278			alloc_len -= csio->resid;
2279			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2280				if (vpd_list->list[i] == 0x90) {
2281					*TLR_bits = TLR_on;
2282					break;
2283				}
2284			}
2285		}
2286
2287		/*
2288		 * If this is a SATA direct-access end device, mark it so that
2289		 * a SCSI StartStopUnit command will be sent to it when the
2290		 * driver is being shutdown.
2291		 */
2292		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2293		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2294		    (sc->mapping_table[target_id].device_info &
2295		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2296		    ((sc->mapping_table[target_id].device_info &
2297		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2298		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2299			target = &sassc->targets[target_id];
2300			target->supports_SSU = TRUE;
2301			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2302			    target_id);
2303		}
2304		break;
2305	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2306	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2307		/*
2308		 * If devinfo is 0 this will be a volume.  In that case don't
2309		 * tell CAM that the volume is not there.  We want volumes to
2310		 * be enumerated until they are deleted/removed, not just
2311		 * failed.
2312		 */
2313		if (cm->cm_targ->devinfo == 0)
2314			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2315		else
2316			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2317		break;
2318	case MPI2_IOCSTATUS_INVALID_SGL:
2319		mps_print_scsiio_cmd(sc, cm);
2320		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2321		break;
2322	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2323		/*
2324		 * This is one of the responses that comes back when an I/O
2325		 * has been aborted.  If it is because of a timeout that we
2326		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2327		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2328		 * command is the same (it gets retried, subject to the
2329		 * retry counter), the only difference is what gets printed
2330		 * on the console.
2331		 */
2332		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2333			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2334		else
2335			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2336		break;
2337	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2338		/* resid is ignored for this condition */
2339		csio->resid = 0;
2340		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2341		break;
2342	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2343	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2344		/*
2345		 * These can sometimes be transient transport-related
2346		 * errors, and sometimes persistent drive-related errors.
2347		 * We used to retry these without decrementing the retry
2348		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2349		 * we hit a persistent drive problem that returns one of
2350		 * these error codes, we would retry indefinitely.  So,
2351		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2352		 * count and avoid infinite retries.  We're taking the
2353		 * potential risk of flagging false failures in the event
2354		 * of a topology-related error (e.g. a SAS expander problem
2355		 * causes a command addressed to a drive to fail), but
2356		 * avoiding getting into an infinite retry loop.
2357		 */
2358		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2359		mps_dprint(sc, MPS_INFO,
2360		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2361		    mps_describe_table(mps_iocstatus_string,
2362		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2363		    target_id, cm->cm_desc.Default.SMID,
2364		    le32toh(rep->IOCLogInfo));
2365		mps_dprint(sc, MPS_XINFO,
2366		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2367		    rep->SCSIStatus, rep->SCSIState,
2368		    le32toh(rep->TransferCount));
2369		break;
2370	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2371	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2372	case MPI2_IOCSTATUS_INVALID_VPID:
2373	case MPI2_IOCSTATUS_INVALID_FIELD:
2374	case MPI2_IOCSTATUS_INVALID_STATE:
2375	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2376	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2377	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2378	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2379	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2380	default:
2381		mpssas_log_command(cm, MPS_XINFO,
2382		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2383		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2384		    rep->SCSIStatus, rep->SCSIState,
2385		    le32toh(rep->TransferCount));
2386		csio->resid = cm->cm_length;
2387		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2388		break;
2389	}
2390
2391	mps_sc_failed_io_info(sc,csio,rep);
2392
2393	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2394		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2395		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2396		mps_dprint(sc, MPS_XINFO, "Command completed, "
2397		    "unfreezing SIM queue\n");
2398	}
2399
2400	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2401		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2402		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2403	}
2404
2405	mps_free_command(sc, cm);
2406	xpt_done(ccb);
2407}
2408
2409/* All Request reached here are Endian safe */
2410static void
2411mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2412    union ccb *ccb) {
2413	pMpi2SCSIIORequest_t	pIO_req;
2414	struct mps_softc	*sc = sassc->sc;
2415	uint64_t		virtLBA;
2416	uint32_t		physLBA, stripe_offset, stripe_unit;
2417	uint32_t		io_size, column;
2418	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2419
2420	/*
2421	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2422	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2423	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2424	 * bit different than the 10/16 CDBs, handle them separately.
2425	 */
2426	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2427	CDB = pIO_req->CDB.CDB32;
2428
2429	/*
2430	 * Handle 6 byte CDBs.
2431	 */
2432	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2433	    (CDB[0] == WRITE_6))) {
2434		/*
2435		 * Get the transfer size in blocks.
2436		 */
2437		io_size = (cm->cm_length >> sc->DD_block_exponent);
2438
2439		/*
2440		 * Get virtual LBA given in the CDB.
2441		 */
2442		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2443		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2444
2445		/*
2446		 * Check that LBA range for I/O does not exceed volume's
2447		 * MaxLBA.
2448		 */
2449		if ((virtLBA + (uint64_t)io_size - 1) <=
2450		    sc->DD_max_lba) {
2451			/*
2452			 * Check if the I/O crosses a stripe boundary.  If not,
2453			 * translate the virtual LBA to a physical LBA and set
2454			 * the DevHandle for the PhysDisk to be used.  If it
2455			 * does cross a boundary, do normal I/O.  To get the
2456			 * right DevHandle to use, get the map number for the
2457			 * column, then use that map number to look up the
2458			 * DevHandle of the PhysDisk.
2459			 */
2460			stripe_offset = (uint32_t)virtLBA &
2461			    (sc->DD_stripe_size - 1);
2462			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2463				physLBA = (uint32_t)virtLBA >>
2464				    sc->DD_stripe_exponent;
2465				stripe_unit = physLBA / sc->DD_num_phys_disks;
2466				column = physLBA % sc->DD_num_phys_disks;
2467				pIO_req->DevHandle =
2468				    htole16(sc->DD_column_map[column].dev_handle);
2469				/* ???? Is this endian safe*/
2470				cm->cm_desc.SCSIIO.DevHandle =
2471				    pIO_req->DevHandle;
2472
2473				physLBA = (stripe_unit <<
2474				    sc->DD_stripe_exponent) + stripe_offset;
2475				ptrLBA = &pIO_req->CDB.CDB32[1];
2476				physLBA_byte = (uint8_t)(physLBA >> 16);
2477				*ptrLBA = physLBA_byte;
2478				ptrLBA = &pIO_req->CDB.CDB32[2];
2479				physLBA_byte = (uint8_t)(physLBA >> 8);
2480				*ptrLBA = physLBA_byte;
2481				ptrLBA = &pIO_req->CDB.CDB32[3];
2482				physLBA_byte = (uint8_t)physLBA;
2483				*ptrLBA = physLBA_byte;
2484
2485				/*
2486				 * Set flag that Direct Drive I/O is
2487				 * being done.
2488				 */
2489				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2490			}
2491		}
2492		return;
2493	}
2494
2495	/*
2496	 * Handle 10, 12 or 16 byte CDBs.
2497	 */
2498	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2499	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2500	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2501	    (CDB[0] == WRITE_12))) {
2502		/*
2503		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2504		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2505		 * the else section.  10-byte and 12-byte CDB's are OK.
2506		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2507		 * ready to accept 12byte CDB for Direct IOs.
2508		 */
2509		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2510		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2511		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2512			/*
2513			 * Get the transfer size in blocks.
2514			 */
2515			io_size = (cm->cm_length >> sc->DD_block_exponent);
2516
2517			/*
2518			 * Get virtual LBA.  Point to correct lower 4 bytes of
2519			 * LBA in the CDB depending on command.
2520			 */
2521			lba_idx = ((CDB[0] == READ_12) ||
2522				(CDB[0] == WRITE_12) ||
2523				(CDB[0] == READ_10) ||
2524				(CDB[0] == WRITE_10))? 2 : 6;
2525			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2526			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2527			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2528			    (uint64_t)CDB[lba_idx + 3];
2529
2530			/*
2531			 * Check that LBA range for I/O does not exceed volume's
2532			 * MaxLBA.
2533			 */
2534			if ((virtLBA + (uint64_t)io_size - 1) <=
2535			    sc->DD_max_lba) {
2536				/*
2537				 * Check if the I/O crosses a stripe boundary.
2538				 * If not, translate the virtual LBA to a
2539				 * physical LBA and set the DevHandle for the
2540				 * PhysDisk to be used.  If it does cross a
2541				 * boundary, do normal I/O.  To get the right
2542				 * DevHandle to use, get the map number for the
2543				 * column, then use that map number to look up
2544				 * the DevHandle of the PhysDisk.
2545				 */
2546				stripe_offset = (uint32_t)virtLBA &
2547				    (sc->DD_stripe_size - 1);
2548				if ((stripe_offset + io_size) <=
2549				    sc->DD_stripe_size) {
2550					physLBA = (uint32_t)virtLBA >>
2551					    sc->DD_stripe_exponent;
2552					stripe_unit = physLBA /
2553					    sc->DD_num_phys_disks;
2554					column = physLBA %
2555					    sc->DD_num_phys_disks;
2556					pIO_req->DevHandle =
2557					    htole16(sc->DD_column_map[column].
2558					    dev_handle);
2559					cm->cm_desc.SCSIIO.DevHandle =
2560					    pIO_req->DevHandle;
2561
2562					physLBA = (stripe_unit <<
2563					    sc->DD_stripe_exponent) +
2564					    stripe_offset;
2565					ptrLBA =
2566					    &pIO_req->CDB.CDB32[lba_idx];
2567					physLBA_byte = (uint8_t)(physLBA >> 24);
2568					*ptrLBA = physLBA_byte;
2569					ptrLBA =
2570					    &pIO_req->CDB.CDB32[lba_idx + 1];
2571					physLBA_byte = (uint8_t)(physLBA >> 16);
2572					*ptrLBA = physLBA_byte;
2573					ptrLBA =
2574					    &pIO_req->CDB.CDB32[lba_idx + 2];
2575					physLBA_byte = (uint8_t)(physLBA >> 8);
2576					*ptrLBA = physLBA_byte;
2577					ptrLBA =
2578					    &pIO_req->CDB.CDB32[lba_idx + 3];
2579					physLBA_byte = (uint8_t)physLBA;
2580					*ptrLBA = physLBA_byte;
2581
2582					/*
2583					 * Set flag that Direct Drive I/O is
2584					 * being done.
2585					 */
2586					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2587				}
2588			}
2589		} else {
2590			/*
2591			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2592			 * 0.  Get the transfer size in blocks.
2593			 */
2594			io_size = (cm->cm_length >> sc->DD_block_exponent);
2595
2596			/*
2597			 * Get virtual LBA.
2598			 */
2599			virtLBA = ((uint64_t)CDB[2] << 54) |
2600			    ((uint64_t)CDB[3] << 48) |
2601			    ((uint64_t)CDB[4] << 40) |
2602			    ((uint64_t)CDB[5] << 32) |
2603			    ((uint64_t)CDB[6] << 24) |
2604			    ((uint64_t)CDB[7] << 16) |
2605			    ((uint64_t)CDB[8] << 8) |
2606			    (uint64_t)CDB[9];
2607
2608			/*
2609			 * Check that LBA range for I/O does not exceed volume's
2610			 * MaxLBA.
2611			 */
2612			if ((virtLBA + (uint64_t)io_size - 1) <=
2613			    sc->DD_max_lba) {
2614				/*
2615				 * Check if the I/O crosses a stripe boundary.
2616				 * If not, translate the virtual LBA to a
2617				 * physical LBA and set the DevHandle for the
2618				 * PhysDisk to be used.  If it does cross a
2619				 * boundary, do normal I/O.  To get the right
2620				 * DevHandle to use, get the map number for the
2621				 * column, then use that map number to look up
2622				 * the DevHandle of the PhysDisk.
2623				 */
2624				stripe_offset = (uint32_t)virtLBA &
2625				    (sc->DD_stripe_size - 1);
2626				if ((stripe_offset + io_size) <=
2627				    sc->DD_stripe_size) {
2628					physLBA = (uint32_t)(virtLBA >>
2629					    sc->DD_stripe_exponent);
2630					stripe_unit = physLBA /
2631					    sc->DD_num_phys_disks;
2632					column = physLBA %
2633					    sc->DD_num_phys_disks;
2634					pIO_req->DevHandle =
2635					    htole16(sc->DD_column_map[column].
2636					    dev_handle);
2637					cm->cm_desc.SCSIIO.DevHandle =
2638					    pIO_req->DevHandle;
2639
2640					physLBA = (stripe_unit <<
2641					    sc->DD_stripe_exponent) +
2642					    stripe_offset;
2643
2644					/*
2645					 * Set upper 4 bytes of LBA to 0.  We
2646					 * assume that the phys disks are less
2647					 * than 2 TB's in size.  Then, set the
2648					 * lower 4 bytes.
2649					 */
2650					pIO_req->CDB.CDB32[2] = 0;
2651					pIO_req->CDB.CDB32[3] = 0;
2652					pIO_req->CDB.CDB32[4] = 0;
2653					pIO_req->CDB.CDB32[5] = 0;
2654					ptrLBA = &pIO_req->CDB.CDB32[6];
2655					physLBA_byte = (uint8_t)(physLBA >> 24);
2656					*ptrLBA = physLBA_byte;
2657					ptrLBA = &pIO_req->CDB.CDB32[7];
2658					physLBA_byte = (uint8_t)(physLBA >> 16);
2659					*ptrLBA = physLBA_byte;
2660					ptrLBA = &pIO_req->CDB.CDB32[8];
2661					physLBA_byte = (uint8_t)(physLBA >> 8);
2662					*ptrLBA = physLBA_byte;
2663					ptrLBA = &pIO_req->CDB.CDB32[9];
2664					physLBA_byte = (uint8_t)physLBA;
2665					*ptrLBA = physLBA_byte;
2666
2667					/*
2668					 * Set flag that Direct Drive I/O is
2669					 * being done.
2670					 */
2671					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2672				}
2673			}
2674		}
2675	}
2676}
2677
2678#if __FreeBSD_version >= 900026
2679static void
2680mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2681{
2682	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2683	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2684	uint64_t sasaddr;
2685	union ccb *ccb;
2686
2687	ccb = cm->cm_complete_data;
2688
2689	/*
2690	 * Currently there should be no way we can hit this case.  It only
2691	 * happens when we have a failure to allocate chain frames, and SMP
2692	 * commands require two S/G elements only.  That should be handled
2693	 * in the standard request size.
2694	 */
2695	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2696		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2697			   __func__, cm->cm_flags);
2698		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2699		goto bailout;
2700        }
2701
2702	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2703	if (rpl == NULL) {
2704		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2705		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2706		goto bailout;
2707	}
2708
2709	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2710	sasaddr = le32toh(req->SASAddress.Low);
2711	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2712
2713	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2714	    MPI2_IOCSTATUS_SUCCESS ||
2715	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2716		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2717		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2718		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2719		goto bailout;
2720	}
2721
2722	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2723		   "%#jx completed successfully\n", __func__,
2724		   (uintmax_t)sasaddr);
2725
2726	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2727		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2728	else
2729		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2730
2731bailout:
2732	/*
2733	 * We sync in both directions because we had DMAs in the S/G list
2734	 * in both directions.
2735	 */
2736	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2737			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2738	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2739	mps_free_command(sc, cm);
2740	xpt_done(ccb);
2741}
2742
2743static void
2744mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2745{
2746	struct mps_command *cm;
2747	uint8_t *request, *response;
2748	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2749	struct mps_softc *sc;
2750	int error;
2751
2752	sc = sassc->sc;
2753	error = 0;
2754
2755	/*
2756	 * XXX We don't yet support physical addresses here.
2757	 */
2758	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2759	case CAM_DATA_PADDR:
2760	case CAM_DATA_SG_PADDR:
2761		mps_dprint(sc, MPS_ERROR,
2762			   "%s: physical addresses not supported\n", __func__);
2763		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2764		xpt_done(ccb);
2765		return;
2766	case CAM_DATA_SG:
2767		/*
2768		 * The chip does not support more than one buffer for the
2769		 * request or response.
2770		 */
2771	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2772		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2773			mps_dprint(sc, MPS_ERROR,
2774				   "%s: multiple request or response "
2775				   "buffer segments not supported for SMP\n",
2776				   __func__);
2777			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2778			xpt_done(ccb);
2779			return;
2780		}
2781
2782		/*
2783		 * The CAM_SCATTER_VALID flag was originally implemented
2784		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2785		 * We have two.  So, just take that flag to mean that we
2786		 * might have S/G lists, and look at the S/G segment count
2787		 * to figure out whether that is the case for each individual
2788		 * buffer.
2789		 */
2790		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2791			bus_dma_segment_t *req_sg;
2792
2793			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2794			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2795		} else
2796			request = ccb->smpio.smp_request;
2797
2798		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2799			bus_dma_segment_t *rsp_sg;
2800
2801			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2802			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2803		} else
2804			response = ccb->smpio.smp_response;
2805		break;
2806	case CAM_DATA_VADDR:
2807		request = ccb->smpio.smp_request;
2808		response = ccb->smpio.smp_response;
2809		break;
2810	default:
2811		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2812		xpt_done(ccb);
2813		return;
2814	}
2815
2816	cm = mps_alloc_command(sc);
2817	if (cm == NULL) {
2818		mps_dprint(sc, MPS_ERROR,
2819		    "%s: cannot allocate command\n", __func__);
2820		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2821		xpt_done(ccb);
2822		return;
2823	}
2824
2825	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2826	bzero(req, sizeof(*req));
2827	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2828
2829	/* Allow the chip to use any route to this SAS address. */
2830	req->PhysicalPort = 0xff;
2831
2832	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2833	req->SGLFlags =
2834	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2835
2836	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2837	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2838
2839	mpi_init_sge(cm, req, &req->SGL);
2840
2841	/*
2842	 * Set up a uio to pass into mps_map_command().  This allows us to
2843	 * do one map command, and one busdma call in there.
2844	 */
2845	cm->cm_uio.uio_iov = cm->cm_iovec;
2846	cm->cm_uio.uio_iovcnt = 2;
2847	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2848
2849	/*
2850	 * The read/write flag isn't used by busdma, but set it just in
2851	 * case.  This isn't exactly accurate, either, since we're going in
2852	 * both directions.
2853	 */
2854	cm->cm_uio.uio_rw = UIO_WRITE;
2855
2856	cm->cm_iovec[0].iov_base = request;
2857	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2858	cm->cm_iovec[1].iov_base = response;
2859	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2860
2861	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2862			       cm->cm_iovec[1].iov_len;
2863
2864	/*
2865	 * Trigger a warning message in mps_data_cb() for the user if we
2866	 * wind up exceeding two S/G segments.  The chip expects one
2867	 * segment for the request and another for the response.
2868	 */
2869	cm->cm_max_segs = 2;
2870
2871	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2872	cm->cm_complete = mpssas_smpio_complete;
2873	cm->cm_complete_data = ccb;
2874
2875	/*
2876	 * Tell the mapping code that we're using a uio, and that this is
2877	 * an SMP passthrough request.  There is a little special-case
2878	 * logic there (in mps_data_cb()) to handle the bidirectional
2879	 * transfer.
2880	 */
2881	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2882			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2883
2884	/* The chip data format is little endian. */
2885	req->SASAddress.High = htole32(sasaddr >> 32);
2886	req->SASAddress.Low = htole32(sasaddr);
2887
2888	/*
2889	 * XXX Note that we don't have a timeout/abort mechanism here.
2890	 * From the manual, it looks like task management requests only
2891	 * work for SCSI IO and SATA passthrough requests.  We may need to
2892	 * have a mechanism to retry requests in the event of a chip reset
2893	 * at least.  Hopefully the chip will insure that any errors short
2894	 * of that are relayed back to the driver.
2895	 */
2896	error = mps_map_command(sc, cm);
2897	if ((error != 0) && (error != EINPROGRESS)) {
2898		mps_dprint(sc, MPS_ERROR,
2899			   "%s: error %d returned from mps_map_command()\n",
2900			   __func__, error);
2901		goto bailout_error;
2902	}
2903
2904	return;
2905
2906bailout_error:
2907	mps_free_command(sc, cm);
2908	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2909	xpt_done(ccb);
2910	return;
2911
2912}
2913
2914static void
2915mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2916{
2917	struct mps_softc *sc;
2918	struct mpssas_target *targ;
2919	uint64_t sasaddr = 0;
2920
2921	sc = sassc->sc;
2922
2923	/*
2924	 * Make sure the target exists.
2925	 */
2926	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2927	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2928	targ = &sassc->targets[ccb->ccb_h.target_id];
2929	if (targ->handle == 0x0) {
2930		mps_dprint(sc, MPS_ERROR,
2931			   "%s: target %d does not exist!\n", __func__,
2932			   ccb->ccb_h.target_id);
2933		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2934		xpt_done(ccb);
2935		return;
2936	}
2937
2938	/*
2939	 * If this device has an embedded SMP target, we'll talk to it
2940	 * directly.
2941	 * figure out what the expander's address is.
2942	 */
2943	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2944		sasaddr = targ->sasaddr;
2945
2946	/*
2947	 * If we don't have a SAS address for the expander yet, try
2948	 * grabbing it from the page 0x83 information cached in the
2949	 * transport layer for this target.  LSI expanders report the
2950	 * expander SAS address as the port-associated SAS address in
2951	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2952	 * 0x83.
2953	 *
2954	 * XXX KDM disable this for now, but leave it commented out so that
2955	 * it is obvious that this is another possible way to get the SAS
2956	 * address.
2957	 *
2958	 * The parent handle method below is a little more reliable, and
2959	 * the other benefit is that it works for devices other than SES
2960	 * devices.  So you can send a SMP request to a da(4) device and it
2961	 * will get routed to the expander that device is attached to.
2962	 * (Assuming the da(4) device doesn't contain an SMP target...)
2963	 */
2964#if 0
2965	if (sasaddr == 0)
2966		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2967#endif
2968
2969	/*
2970	 * If we still don't have a SAS address for the expander, look for
2971	 * the parent device of this device, which is probably the expander.
2972	 */
2973	if (sasaddr == 0) {
2974#ifdef OLD_MPS_PROBE
2975		struct mpssas_target *parent_target;
2976#endif
2977
2978		if (targ->parent_handle == 0x0) {
2979			mps_dprint(sc, MPS_ERROR,
2980				   "%s: handle %d does not have a valid "
2981				   "parent handle!\n", __func__, targ->handle);
2982			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2983			goto bailout;
2984		}
2985#ifdef OLD_MPS_PROBE
2986		parent_target = mpssas_find_target_by_handle(sassc, 0,
2987			targ->parent_handle);
2988
2989		if (parent_target == NULL) {
2990			mps_dprint(sc, MPS_ERROR,
2991				   "%s: handle %d does not have a valid "
2992				   "parent target!\n", __func__, targ->handle);
2993			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2994			goto bailout;
2995		}
2996
2997		if ((parent_target->devinfo &
2998		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2999			mps_dprint(sc, MPS_ERROR,
3000				   "%s: handle %d parent %d does not "
3001				   "have an SMP target!\n", __func__,
3002				   targ->handle, parent_target->handle);
3003			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3004			goto bailout;
3005
3006		}
3007
3008		sasaddr = parent_target->sasaddr;
3009#else /* OLD_MPS_PROBE */
3010		if ((targ->parent_devinfo &
3011		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3012			mps_dprint(sc, MPS_ERROR,
3013				   "%s: handle %d parent %d does not "
3014				   "have an SMP target!\n", __func__,
3015				   targ->handle, targ->parent_handle);
3016			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3017			goto bailout;
3018
3019		}
3020		if (targ->parent_sasaddr == 0x0) {
3021			mps_dprint(sc, MPS_ERROR,
3022				   "%s: handle %d parent handle %d does "
3023				   "not have a valid SAS address!\n",
3024				   __func__, targ->handle, targ->parent_handle);
3025			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3026			goto bailout;
3027		}
3028
3029		sasaddr = targ->parent_sasaddr;
3030#endif /* OLD_MPS_PROBE */
3031
3032	}
3033
3034	if (sasaddr == 0) {
3035		mps_dprint(sc, MPS_INFO,
3036			   "%s: unable to find SAS address for handle %d\n",
3037			   __func__, targ->handle);
3038		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3039		goto bailout;
3040	}
3041	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3042
3043	return;
3044
3045bailout:
3046	xpt_done(ccb);
3047
3048}
3049#endif //__FreeBSD_version >= 900026
3050
3051static void
3052mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3053{
3054	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3055	struct mps_softc *sc;
3056	struct mps_command *tm;
3057	struct mpssas_target *targ;
3058
3059	MPS_FUNCTRACE(sassc->sc);
3060	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3061
3062	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3063	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3064	     ccb->ccb_h.target_id));
3065	sc = sassc->sc;
3066	tm = mps_alloc_command(sc);
3067	if (tm == NULL) {
3068		mps_dprint(sc, MPS_ERROR,
3069		    "command alloc failure in mpssas_action_resetdev\n");
3070		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3071		xpt_done(ccb);
3072		return;
3073	}
3074
3075	targ = &sassc->targets[ccb->ccb_h.target_id];
3076	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3077	req->DevHandle = htole16(targ->handle);
3078	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3079	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3080
3081	/* SAS Hard Link Reset / SATA Link Reset */
3082	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3083
3084	tm->cm_data = NULL;
3085	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3086	tm->cm_complete = mpssas_resetdev_complete;
3087	tm->cm_complete_data = ccb;
3088	tm->cm_targ = targ;
3089	targ->flags |= MPSSAS_TARGET_INRESET;
3090
3091	mps_map_command(sc, tm);
3092}
3093
3094static void
3095mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3096{
3097	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3098	union ccb *ccb;
3099
3100	MPS_FUNCTRACE(sc);
3101	mtx_assert(&sc->mps_mtx, MA_OWNED);
3102
3103	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3104	ccb = tm->cm_complete_data;
3105
3106	/*
3107	 * Currently there should be no way we can hit this case.  It only
3108	 * happens when we have a failure to allocate chain frames, and
3109	 * task management commands don't have S/G lists.
3110	 */
3111	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3112		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3113
3114		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3115
3116		mps_dprint(sc, MPS_ERROR,
3117			   "%s: cm_flags = %#x for reset of handle %#04x! "
3118			   "This should not happen!\n", __func__, tm->cm_flags,
3119			   req->DevHandle);
3120		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3121		goto bailout;
3122	}
3123
3124	mps_dprint(sc, MPS_XINFO,
3125	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3126	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3127
3128	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3129		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3130		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3131		    CAM_LUN_WILDCARD);
3132	}
3133	else
3134		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3135
3136bailout:
3137
3138	mpssas_free_tm(sc, tm);
3139	xpt_done(ccb);
3140}
3141
3142static void
3143mpssas_poll(struct cam_sim *sim)
3144{
3145	struct mpssas_softc *sassc;
3146
3147	sassc = cam_sim_softc(sim);
3148
3149	if (sassc->sc->mps_debug & MPS_TRACE) {
3150		/* frequent debug messages during a panic just slow
3151		 * everything down too much.
3152		 */
3153		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3154		sassc->sc->mps_debug &= ~MPS_TRACE;
3155	}
3156
3157	mps_intr_locked(sassc->sc);
3158}
3159
3160static void
3161mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3162	     void *arg)
3163{
3164	struct mps_softc *sc;
3165
3166	sc = (struct mps_softc *)callback_arg;
3167
3168	switch (code) {
3169#if (__FreeBSD_version >= 1000006) || \
3170    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3171	case AC_ADVINFO_CHANGED: {
3172		struct mpssas_target *target;
3173		struct mpssas_softc *sassc;
3174		struct scsi_read_capacity_data_long rcap_buf;
3175		struct ccb_dev_advinfo cdai;
3176		struct mpssas_lun *lun;
3177		lun_id_t lunid;
3178		int found_lun;
3179		uintptr_t buftype;
3180
3181		buftype = (uintptr_t)arg;
3182
3183		found_lun = 0;
3184		sassc = sc->sassc;
3185
3186		/*
3187		 * We're only interested in read capacity data changes.
3188		 */
3189		if (buftype != CDAI_TYPE_RCAPLONG)
3190			break;
3191
3192		/*
3193		 * We should have a handle for this, but check to make sure.
3194		 */
3195		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3196		    ("Target %d out of bounds in mpssas_async\n",
3197		    xpt_path_target_id(path)));
3198		target = &sassc->targets[xpt_path_target_id(path)];
3199		if (target->handle == 0)
3200			break;
3201
3202		lunid = xpt_path_lun_id(path);
3203
3204		SLIST_FOREACH(lun, &target->luns, lun_link) {
3205			if (lun->lun_id == lunid) {
3206				found_lun = 1;
3207				break;
3208			}
3209		}
3210
3211		if (found_lun == 0) {
3212			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3213				     M_NOWAIT | M_ZERO);
3214			if (lun == NULL) {
3215				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3216					   "LUN for EEDP support.\n");
3217				break;
3218			}
3219			lun->lun_id = lunid;
3220			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3221		}
3222
3223		bzero(&rcap_buf, sizeof(rcap_buf));
3224		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3225		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3226		cdai.ccb_h.flags = CAM_DIR_IN;
3227		cdai.buftype = CDAI_TYPE_RCAPLONG;
3228#if (__FreeBSD_version >= 1100061) || \
3229    ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3230		cdai.flags = CDAI_FLAG_NONE;
3231#else
3232		cdai.flags = 0;
3233#endif
3234		cdai.bufsiz = sizeof(rcap_buf);
3235		cdai.buf = (uint8_t *)&rcap_buf;
3236		xpt_action((union ccb *)&cdai);
3237		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3238			cam_release_devq(cdai.ccb_h.path,
3239					 0, 0, 0, FALSE);
3240
3241		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3242		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3243			lun->eedp_formatted = TRUE;
3244			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3245		} else {
3246			lun->eedp_formatted = FALSE;
3247			lun->eedp_block_size = 0;
3248		}
3249		break;
3250	}
3251#else
3252	case AC_FOUND_DEVICE: {
3253		struct ccb_getdev *cgd;
3254
3255		cgd = arg;
3256		mpssas_check_eedp(sc, path, cgd);
3257		break;
3258	}
3259#endif
3260	default:
3261		break;
3262	}
3263}
3264
3265#if (__FreeBSD_version < 901503) || \
3266    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3267static void
3268mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3269		  struct ccb_getdev *cgd)
3270{
3271	struct mpssas_softc *sassc = sc->sassc;
3272	struct ccb_scsiio *csio;
3273	struct scsi_read_capacity_16 *scsi_cmd;
3274	struct scsi_read_capacity_eedp *rcap_buf;
3275	path_id_t pathid;
3276	target_id_t targetid;
3277	lun_id_t lunid;
3278	union ccb *ccb;
3279	struct cam_path *local_path;
3280	struct mpssas_target *target;
3281	struct mpssas_lun *lun;
3282	uint8_t	found_lun;
3283	char path_str[64];
3284
3285	sassc = sc->sassc;
3286	pathid = cam_sim_path(sassc->sim);
3287	targetid = xpt_path_target_id(path);
3288	lunid = xpt_path_lun_id(path);
3289
3290	KASSERT(targetid < sassc->maxtargets,
3291	    ("Target %d out of bounds in mpssas_check_eedp\n",
3292	     targetid));
3293	target = &sassc->targets[targetid];
3294	if (target->handle == 0x0)
3295		return;
3296
3297	/*
3298	 * Determine if the device is EEDP capable.
3299	 *
3300	 * If this flag is set in the inquiry data,
3301	 * the device supports protection information,
3302	 * and must support the 16 byte read
3303	 * capacity command, otherwise continue without
3304	 * sending read cap 16
3305	 */
3306	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3307		return;
3308
3309	/*
3310	 * Issue a READ CAPACITY 16 command.  This info
3311	 * is used to determine if the LUN is formatted
3312	 * for EEDP support.
3313	 */
3314	ccb = xpt_alloc_ccb_nowait();
3315	if (ccb == NULL) {
3316		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3317		    "for EEDP support.\n");
3318		return;
3319	}
3320
3321	if (xpt_create_path(&local_path, xpt_periph,
3322	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3323		mps_dprint(sc, MPS_ERROR, "Unable to create "
3324		    "path for EEDP support\n");
3325		xpt_free_ccb(ccb);
3326		return;
3327	}
3328
3329	/*
3330	 * If LUN is already in list, don't create a new
3331	 * one.
3332	 */
3333	found_lun = FALSE;
3334	SLIST_FOREACH(lun, &target->luns, lun_link) {
3335		if (lun->lun_id == lunid) {
3336			found_lun = TRUE;
3337			break;
3338		}
3339	}
3340	if (!found_lun) {
3341		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3342		    M_NOWAIT | M_ZERO);
3343		if (lun == NULL) {
3344			mps_dprint(sc, MPS_ERROR,
3345			    "Unable to alloc LUN for EEDP support.\n");
3346			xpt_free_path(local_path);
3347			xpt_free_ccb(ccb);
3348			return;
3349		}
3350		lun->lun_id = lunid;
3351		SLIST_INSERT_HEAD(&target->luns, lun,
3352		    lun_link);
3353	}
3354
3355	xpt_path_string(local_path, path_str, sizeof(path_str));
3356
3357	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3358	    path_str, target->handle);
3359
3360	/*
3361	 * Issue a READ CAPACITY 16 command for the LUN.
3362	 * The mpssas_read_cap_done function will load
3363	 * the read cap info into the LUN struct.
3364	 */
3365	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3366	    M_MPT2, M_NOWAIT | M_ZERO);
3367	if (rcap_buf == NULL) {
3368		mps_dprint(sc, MPS_FAULT,
3369		    "Unable to alloc read capacity buffer for EEDP support.\n");
3370		xpt_free_path(ccb->ccb_h.path);
3371		xpt_free_ccb(ccb);
3372		return;
3373	}
3374	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3375	csio = &ccb->csio;
3376	csio->ccb_h.func_code = XPT_SCSI_IO;
3377	csio->ccb_h.flags = CAM_DIR_IN;
3378	csio->ccb_h.retry_count = 4;
3379	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3380	csio->ccb_h.timeout = 60000;
3381	csio->data_ptr = (uint8_t *)rcap_buf;
3382	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3383	csio->sense_len = MPS_SENSE_LEN;
3384	csio->cdb_len = sizeof(*scsi_cmd);
3385	csio->tag_action = MSG_SIMPLE_Q_TAG;
3386
3387	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3388	bzero(scsi_cmd, sizeof(*scsi_cmd));
3389	scsi_cmd->opcode = 0x9E;
3390	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3391	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3392
3393	ccb->ccb_h.ppriv_ptr1 = sassc;
3394	xpt_action(ccb);
3395}
3396
3397static void
3398mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3399{
3400	struct mpssas_softc *sassc;
3401	struct mpssas_target *target;
3402	struct mpssas_lun *lun;
3403	struct scsi_read_capacity_eedp *rcap_buf;
3404
3405	if (done_ccb == NULL)
3406		return;
3407
3408	/* Driver need to release devq, it Scsi command is
3409	 * generated by driver internally.
3410	 * Currently there is a single place where driver
3411	 * calls scsi command internally. In future if driver
3412	 * calls more scsi command internally, it needs to release
3413	 * devq internally, since those command will not go back to
3414	 * cam_periph.
3415	 */
3416	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3417        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3418		xpt_release_devq(done_ccb->ccb_h.path,
3419			       	/*count*/ 1, /*run_queue*/TRUE);
3420	}
3421
3422	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3423
3424	/*
3425	 * Get the LUN ID for the path and look it up in the LUN list for the
3426	 * target.
3427	 */
3428	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3429	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3430	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3431	     done_ccb->ccb_h.target_id));
3432	target = &sassc->targets[done_ccb->ccb_h.target_id];
3433	SLIST_FOREACH(lun, &target->luns, lun_link) {
3434		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3435			continue;
3436
3437		/*
3438		 * Got the LUN in the target's LUN list.  Fill it in
3439		 * with EEDP info.  If the READ CAP 16 command had some
3440		 * SCSI error (common if command is not supported), mark
3441		 * the lun as not supporting EEDP and set the block size
3442		 * to 0.
3443		 */
3444		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3445		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3446			lun->eedp_formatted = FALSE;
3447			lun->eedp_block_size = 0;
3448			break;
3449		}
3450
3451		if (rcap_buf->protect & 0x01) {
3452			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3453 			    "target ID %d is formatted for EEDP "
3454 			    "support.\n", done_ccb->ccb_h.target_lun,
3455 			    done_ccb->ccb_h.target_id);
3456			lun->eedp_formatted = TRUE;
3457			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3458		}
3459		break;
3460	}
3461
3462	// Finished with this CCB and path.
3463	free(rcap_buf, M_MPT2);
3464	xpt_free_path(done_ccb->ccb_h.path);
3465	xpt_free_ccb(done_ccb);
3466}
3467#endif /* (__FreeBSD_version < 901503) || \
3468          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3469
3470void
3471mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3472    struct mpssas_target *target, lun_id_t lun_id)
3473{
3474	union ccb *ccb;
3475	path_id_t path_id;
3476
3477	/*
3478	 * Set the INRESET flag for this target so that no I/O will be sent to
3479	 * the target until the reset has completed.  If an I/O request does
3480	 * happen, the devq will be frozen.  The CCB holds the path which is
3481	 * used to release the devq.  The devq is released and the CCB is freed
3482	 * when the TM completes.
3483	 */
3484	ccb = xpt_alloc_ccb_nowait();
3485	if (ccb) {
3486		path_id = cam_sim_path(sc->sassc->sim);
3487		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3488		    target->tid, lun_id) != CAM_REQ_CMP) {
3489			xpt_free_ccb(ccb);
3490		} else {
3491			tm->cm_ccb = ccb;
3492			tm->cm_targ = target;
3493			target->flags |= MPSSAS_TARGET_INRESET;
3494		}
3495	}
3496}
3497
3498int
3499mpssas_startup(struct mps_softc *sc)
3500{
3501
3502	/*
3503	 * Send the port enable message and set the wait_for_port_enable flag.
3504	 * This flag helps to keep the simq frozen until all discovery events
3505	 * are processed.
3506	 */
3507	sc->wait_for_port_enable = 1;
3508	mpssas_send_portenable(sc);
3509	return (0);
3510}
3511
3512static int
3513mpssas_send_portenable(struct mps_softc *sc)
3514{
3515	MPI2_PORT_ENABLE_REQUEST *request;
3516	struct mps_command *cm;
3517
3518	MPS_FUNCTRACE(sc);
3519
3520	if ((cm = mps_alloc_command(sc)) == NULL)
3521		return (EBUSY);
3522	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3523	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3524	request->MsgFlags = 0;
3525	request->VP_ID = 0;
3526	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3527	cm->cm_complete = mpssas_portenable_complete;
3528	cm->cm_data = NULL;
3529	cm->cm_sge = NULL;
3530
3531	mps_map_command(sc, cm);
3532	mps_dprint(sc, MPS_XINFO,
3533	    "mps_send_portenable finished cm %p req %p complete %p\n",
3534	    cm, cm->cm_req, cm->cm_complete);
3535	return (0);
3536}
3537
3538static void
3539mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3540{
3541	MPI2_PORT_ENABLE_REPLY *reply;
3542	struct mpssas_softc *sassc;
3543
3544	MPS_FUNCTRACE(sc);
3545	sassc = sc->sassc;
3546
3547	/*
3548	 * Currently there should be no way we can hit this case.  It only
3549	 * happens when we have a failure to allocate chain frames, and
3550	 * port enable commands don't have S/G lists.
3551	 */
3552	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3553		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3554			   "This should not happen!\n", __func__, cm->cm_flags);
3555	}
3556
3557	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3558	if (reply == NULL)
3559		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3560	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3561	    MPI2_IOCSTATUS_SUCCESS)
3562		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3563
3564	mps_free_command(sc, cm);
3565
3566	/*
3567	 * Get WarpDrive info after discovery is complete but before the scan
3568	 * starts.  At this point, all devices are ready to be exposed to the
3569	 * OS.  If devices should be hidden instead, take them out of the
3570	 * 'targets' array before the scan.  The devinfo for a disk will have
3571	 * some info and a volume's will be 0.  Use that to remove disks.
3572	 */
3573	mps_wd_config_pages(sc);
3574
3575	/*
3576	 * Done waiting for port enable to complete.  Decrement the refcount.
3577	 * If refcount is 0, discovery is complete and a rescan of the bus can
3578	 * take place.  Since the simq was explicitly frozen before port
3579	 * enable, it must be explicitly released here to keep the
3580	 * freeze/release count in sync.
3581	 */
3582	sc->wait_for_port_enable = 0;
3583	sc->port_enable_complete = 1;
3584	wakeup(&sc->port_enable_complete);
3585	mpssas_startup_decrement(sassc);
3586}
3587
3588int
3589mpssas_check_id(struct mpssas_softc *sassc, int id)
3590{
3591	struct mps_softc *sc = sassc->sc;
3592	char *ids;
3593	char *name;
3594
3595	ids = &sc->exclude_ids[0];
3596	while((name = strsep(&ids, ",")) != NULL) {
3597		if (name[0] == '\0')
3598			continue;
3599		if (strtol(name, NULL, 0) == (long)id)
3600			return (1);
3601	}
3602
3603	return (0);
3604}
3605
3606void
3607mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3608{
3609	struct mpssas_softc *sassc;
3610	struct mpssas_lun *lun, *lun_tmp;
3611	struct mpssas_target *targ;
3612	int i;
3613
3614	sassc = sc->sassc;
3615	/*
3616	 * The number of targets is based on IOC Facts, so free all of
3617	 * the allocated LUNs for each target and then the target buffer
3618	 * itself.
3619	 */
3620	for (i=0; i< maxtargets; i++) {
3621		targ = &sassc->targets[i];
3622		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3623			free(lun, M_MPT2);
3624		}
3625	}
3626	free(sassc->targets, M_MPT2);
3627
3628	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3629	    M_MPT2, M_WAITOK|M_ZERO);
3630	if (!sassc->targets) {
3631		panic("%s failed to alloc targets with error %d\n",
3632		    __func__, ENOMEM);
3633	}
3634}
3635