mps_sas.c revision 9af66cbda234ef1061db94471badefecc48db86c
1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * LSI MPT-Fusion Host Adapter FreeBSD
28 *
29 * $FreeBSD$
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35/* Communications core for LSI MPT2 */
36
37/* TODO Move headers to mpsvar */
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/selinfo.h>
43#include <sys/module.h>
44#include <sys/bus.h>
45#include <sys/conf.h>
46#include <sys/bio.h>
47#include <sys/malloc.h>
48#include <sys/uio.h>
49#include <sys/sysctl.h>
50#include <sys/endian.h>
51#include <sys/queue.h>
52#include <sys/kthread.h>
53#include <sys/taskqueue.h>
54#include <sys/sbuf.h>
55
56#include <machine/bus.h>
57#include <machine/resource.h>
58#include <sys/rman.h>
59
60#include <machine/stdarg.h>
61
62#include <cam/cam.h>
63#include <cam/cam_ccb.h>
64#include <cam/cam_xpt.h>
65#include <cam/cam_debug.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_periph.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72#if __FreeBSD_version >= 900026
73#include <cam/scsi/smp_all.h>
74#endif
75
76#include <dev/mps/mpi/mpi2_type.h>
77#include <dev/mps/mpi/mpi2.h>
78#include <dev/mps/mpi/mpi2_ioc.h>
79#include <dev/mps/mpi/mpi2_sas.h>
80#include <dev/mps/mpi/mpi2_cnfg.h>
81#include <dev/mps/mpi/mpi2_init.h>
82#include <dev/mps/mpi/mpi2_tool.h>
83#include <dev/mps/mps_ioctl.h>
84#include <dev/mps/mpsvar.h>
85#include <dev/mps/mps_table.h>
86#include <dev/mps/mps_sas.h>
87
88#define MPSSAS_DISCOVERY_TIMEOUT	20
89#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90
91/*
92 * static array to check SCSI OpCode for EEDP protection bits
93 */
94#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97static uint8_t op_code_prot[256] = {
98	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114};
115
116MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117
118static void mpssas_discovery_timeout(void *data);
119static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122static void mpssas_poll(struct cam_sim *sim);
123static void mpssas_scsiio_timeout(void *data);
124static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126    struct mps_command *cm, union ccb *ccb);
127static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130#if __FreeBSD_version >= 900026
131static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133			       uint64_t sasaddr);
134static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135#endif //FreeBSD_version >= 900026
136static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139static void mpssas_async(void *callback_arg, uint32_t code,
140			 struct cam_path *path, void *arg);
141#if (__FreeBSD_version < 901503) || \
142    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144			      struct ccb_getdev *cgd);
145static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146#endif
147static int mpssas_send_portenable(struct mps_softc *sc);
148static void mpssas_portenable_complete(struct mps_softc *sc,
149    struct mps_command *cm);
150
151struct mpssas_target *
152mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153{
154	struct mpssas_target *target;
155	int i;
156
157	for (i = start; i < sassc->maxtargets; i++) {
158		target = &sassc->targets[i];
159		if (target->handle == handle)
160			return (target);
161	}
162
163	return (NULL);
164}
165
166/* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery.  Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
172 */
173void
174mpssas_startup_increment(struct mpssas_softc *sassc)
175{
176	MPS_FUNCTRACE(sassc->sc);
177
178	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179		if (sassc->startup_refcount++ == 0) {
180			/* just starting, freeze the simq */
181			mps_dprint(sassc->sc, MPS_INIT,
182			    "%s freezing simq\n", __func__);
183#if __FreeBSD_version >= 1000039
184			xpt_hold_boot();
185#endif
186			xpt_freeze_simq(sassc->sim, 1);
187		}
188		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189		    sassc->startup_refcount);
190	}
191}
192
193void
194mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195{
196	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198		xpt_release_simq(sassc->sim, 1);
199		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200	}
201}
202
203void
204mpssas_startup_decrement(struct mpssas_softc *sassc)
205{
206	MPS_FUNCTRACE(sassc->sc);
207
208	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209		if (--sassc->startup_refcount == 0) {
210			/* finished all discovery-related actions, release
211			 * the simq and rescan for the latest topology.
212			 */
213			mps_dprint(sassc->sc, MPS_INIT,
214			    "%s releasing simq\n", __func__);
215			sassc->flags &= ~MPSSAS_IN_STARTUP;
216			xpt_release_simq(sassc->sim, 1);
217#if __FreeBSD_version >= 1000039
218			xpt_release_boot();
219#else
220			mpssas_rescan_target(sassc->sc, NULL);
221#endif
222		}
223		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224		    sassc->startup_refcount);
225	}
226}
227
228/* LSI's firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
230 * use.
231 */
232struct mps_command *
233mpssas_alloc_tm(struct mps_softc *sc)
234{
235	struct mps_command *tm;
236
237	MPS_FUNCTRACE(sc);
238	tm = mps_alloc_high_priority_command(sc);
239	if (tm != NULL) {
240		if (sc->sassc->tm_count++ == 0) {
241			mps_dprint(sc, MPS_RECOVERY,
242			    "%s freezing simq\n", __func__);
243			xpt_freeze_simq(sc->sassc->sim, 1);
244		}
245		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
246		    sc->sassc->tm_count);
247	}
248	return tm;
249}
250
251void
252mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
253{
254	mps_dprint(sc, MPS_TRACE, "%s", __func__);
255	if (tm == NULL)
256		return;
257
258	/* if there are no TMs in use, we can release the simq.  We use our
259	 * own refcount so that it's easier for a diag reset to cleanup and
260	 * release the simq.
261	 */
262	if (--sc->sassc->tm_count == 0) {
263		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
264		xpt_release_simq(sc->sassc->sim, 1);
265	}
266	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
267	    sc->sassc->tm_count);
268
269	mps_free_high_priority_command(sc, tm);
270}
271
272void
273mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
274{
275	struct mpssas_softc *sassc = sc->sassc;
276	path_id_t pathid;
277	target_id_t targetid;
278	union ccb *ccb;
279
280	MPS_FUNCTRACE(sc);
281	pathid = cam_sim_path(sassc->sim);
282	if (targ == NULL)
283		targetid = CAM_TARGET_WILDCARD;
284	else
285		targetid = targ - sassc->targets;
286
287	/*
288	 * Allocate a CCB and schedule a rescan.
289	 */
290	ccb = xpt_alloc_ccb_nowait();
291	if (ccb == NULL) {
292		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
293		return;
294	}
295
296	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
297	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
299		xpt_free_ccb(ccb);
300		return;
301	}
302
303	if (targetid == CAM_TARGET_WILDCARD)
304		ccb->ccb_h.func_code = XPT_SCAN_BUS;
305	else
306		ccb->ccb_h.func_code = XPT_SCAN_TGT;
307
308	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
309	xpt_rescan(ccb);
310}
311
312static void
313mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
314{
315	struct sbuf sb;
316	va_list ap;
317	char str[192];
318	char path_str[64];
319
320	if (cm == NULL)
321		return;
322
323	/* No need to be in here if debugging isn't enabled */
324	if ((cm->cm_sc->mps_debug & level) == 0)
325		return;
326
327	sbuf_new(&sb, str, sizeof(str), 0);
328
329	va_start(ap, fmt);
330
331	if (cm->cm_ccb != NULL) {
332		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333				sizeof(path_str));
334		sbuf_cat(&sb, path_str);
335		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336			scsi_command_string(&cm->cm_ccb->csio, &sb);
337			sbuf_printf(&sb, "length %d ",
338				    cm->cm_ccb->csio.dxfer_len);
339		}
340	}
341	else {
342		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
343		    cam_sim_name(cm->cm_sc->sassc->sim),
344		    cam_sim_unit(cm->cm_sc->sassc->sim),
345		    cam_sim_bus(cm->cm_sc->sassc->sim),
346		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
347		    cm->cm_lun);
348	}
349
350	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
351	sbuf_vprintf(&sb, fmt, ap);
352	sbuf_finish(&sb);
353	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
354
355	va_end(ap);
356}
357
358
359static void
360mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
361{
362	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
363	struct mpssas_target *targ;
364	uint16_t handle;
365
366	MPS_FUNCTRACE(sc);
367
368	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
369	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
370	targ = tm->cm_targ;
371
372	if (reply == NULL) {
373		/* XXX retry the remove after the diag reset completes? */
374		mps_dprint(sc, MPS_FAULT,
375		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
376		mpssas_free_tm(sc, tm);
377		return;
378	}
379
380	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
381		mps_dprint(sc, MPS_FAULT,
382		   "IOCStatus = 0x%x while resetting device 0x%x\n",
383		   reply->IOCStatus, handle);
384		mpssas_free_tm(sc, tm);
385		return;
386	}
387
388	mps_dprint(sc, MPS_XINFO,
389	    "Reset aborted %u commands\n", reply->TerminationCount);
390	mps_free_reply(sc, tm->cm_reply_data);
391	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
392
393	mps_dprint(sc, MPS_XINFO,
394	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
395
396	/*
397	 * Don't clear target if remove fails because things will get confusing.
398	 * Leave the devname and sasaddr intact so that we know to avoid reusing
399	 * this target id if possible, and so we can assign the same target id
400	 * to this device if it comes back in the future.
401	 */
402	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
403		targ = tm->cm_targ;
404		targ->handle = 0x0;
405		targ->encl_handle = 0x0;
406		targ->encl_slot = 0x0;
407		targ->exp_dev_handle = 0x0;
408		targ->phy_num = 0x0;
409		targ->linkrate = 0x0;
410		targ->devinfo = 0x0;
411		targ->flags = 0x0;
412	}
413
414	mpssas_free_tm(sc, tm);
415}
416
417
418/*
419 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
420 * Otherwise Volume Delete is same as Bare Drive Removal.
421 */
422void
423mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
424{
425	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
426	struct mps_softc *sc;
427	struct mps_command *cm;
428	struct mpssas_target *targ = NULL;
429
430	MPS_FUNCTRACE(sassc->sc);
431	sc = sassc->sc;
432
433#ifdef WD_SUPPORT
434	/*
435	 * If this is a WD controller, determine if the disk should be exposed
436	 * to the OS or not.  If disk should be exposed, return from this
437	 * function without doing anything.
438	 */
439	if (sc->WD_available && (sc->WD_hide_expose ==
440	    MPS_WD_EXPOSE_ALWAYS)) {
441		return;
442	}
443#endif //WD_SUPPORT
444
445	targ = mpssas_find_target_by_handle(sassc, 0, handle);
446	if (targ == NULL) {
447		/* FIXME: what is the action? */
448		/* We don't know about this device? */
449		mps_dprint(sc, MPS_ERROR,
450		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
451		return;
452	}
453
454	targ->flags |= MPSSAS_TARGET_INREMOVAL;
455
456	cm = mpssas_alloc_tm(sc);
457	if (cm == NULL) {
458		mps_dprint(sc, MPS_ERROR,
459		    "%s: command alloc failure\n", __func__);
460		return;
461	}
462
463	mpssas_rescan_target(sc, targ);
464
465	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
466	req->DevHandle = targ->handle;
467	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
468	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
469
470	/* SAS Hard Link Reset / SATA Link Reset */
471	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
472
473	cm->cm_targ = targ;
474	cm->cm_data = NULL;
475	cm->cm_desc.HighPriority.RequestFlags =
476	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
477	cm->cm_complete = mpssas_remove_volume;
478	cm->cm_complete_data = (void *)(uintptr_t)handle;
479	mps_map_command(sc, cm);
480}
481
482/*
483 * The MPT2 firmware performs debounce on the link to avoid transient link
484 * errors and false removals.  When it does decide that link has been lost
485 * and a device need to go away, it expects that the host will perform a
486 * target reset and then an op remove.  The reset has the side-effect of
487 * aborting any outstanding requests for the device, which is required for
488 * the op-remove to succeed.  It's not clear if the host should check for
489 * the device coming back alive after the reset.
490 */
491void
492mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
493{
494	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495	struct mps_softc *sc;
496	struct mps_command *cm;
497	struct mpssas_target *targ = NULL;
498
499	MPS_FUNCTRACE(sassc->sc);
500
501	sc = sassc->sc;
502
503	targ = mpssas_find_target_by_handle(sassc, 0, handle);
504	if (targ == NULL) {
505		/* FIXME: what is the action? */
506		/* We don't know about this device? */
507		mps_dprint(sc, MPS_ERROR,
508		    "%s : invalid handle 0x%x \n", __func__, handle);
509		return;
510	}
511
512	targ->flags |= MPSSAS_TARGET_INREMOVAL;
513
514	cm = mpssas_alloc_tm(sc);
515	if (cm == NULL) {
516		mps_dprint(sc, MPS_ERROR,
517		    "%s: command alloc failure\n", __func__);
518		return;
519	}
520
521	mpssas_rescan_target(sc, targ);
522
523	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524	memset(req, 0, sizeof(*req));
525	req->DevHandle = htole16(targ->handle);
526	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528
529	/* SAS Hard Link Reset / SATA Link Reset */
530	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531
532	cm->cm_targ = targ;
533	cm->cm_data = NULL;
534	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
535	cm->cm_complete = mpssas_remove_device;
536	cm->cm_complete_data = (void *)(uintptr_t)handle;
537	mps_map_command(sc, cm);
538}
539
540static void
541mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
542{
543	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
544	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
545	struct mpssas_target *targ;
546	struct mps_command *next_cm;
547	uint16_t handle;
548
549	MPS_FUNCTRACE(sc);
550
551	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
552	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
553	targ = tm->cm_targ;
554
555	/*
556	 * Currently there should be no way we can hit this case.  It only
557	 * happens when we have a failure to allocate chain frames, and
558	 * task management commands don't have S/G lists.
559	 */
560	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
561		mps_dprint(sc, MPS_ERROR,
562		    "%s: cm_flags = %#x for remove of handle %#04x! "
563		    "This should not happen!\n", __func__, tm->cm_flags,
564		    handle);
565		mpssas_free_tm(sc, tm);
566		return;
567	}
568
569	if (reply == NULL) {
570		/* XXX retry the remove after the diag reset completes? */
571		mps_dprint(sc, MPS_FAULT,
572		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
573		mpssas_free_tm(sc, tm);
574		return;
575	}
576
577	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
578		mps_dprint(sc, MPS_FAULT,
579		   "IOCStatus = 0x%x while resetting device 0x%x\n",
580		   le16toh(reply->IOCStatus), handle);
581		mpssas_free_tm(sc, tm);
582		return;
583	}
584
585	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
586	    le32toh(reply->TerminationCount));
587	mps_free_reply(sc, tm->cm_reply_data);
588	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
589
590	/* Reuse the existing command */
591	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
592	memset(req, 0, sizeof(*req));
593	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
594	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
595	req->DevHandle = htole16(handle);
596	tm->cm_data = NULL;
597	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
598	tm->cm_complete = mpssas_remove_complete;
599	tm->cm_complete_data = (void *)(uintptr_t)handle;
600
601	mps_map_command(sc, tm);
602
603	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
604		   targ->tid, handle);
605	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
606		union ccb *ccb;
607
608		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
609		ccb = tm->cm_complete_data;
610		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
611		mpssas_scsiio_complete(sc, tm);
612	}
613}
614
615static void
616mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
617{
618	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
619	uint16_t handle;
620	struct mpssas_target *targ;
621	struct mpssas_lun *lun;
622
623	MPS_FUNCTRACE(sc);
624
625	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
626	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
627
628	/*
629	 * Currently there should be no way we can hit this case.  It only
630	 * happens when we have a failure to allocate chain frames, and
631	 * task management commands don't have S/G lists.
632	 */
633	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
634		mps_dprint(sc, MPS_XINFO,
635			   "%s: cm_flags = %#x for remove of handle %#04x! "
636			   "This should not happen!\n", __func__, tm->cm_flags,
637			   handle);
638		mpssas_free_tm(sc, tm);
639		return;
640	}
641
642	if (reply == NULL) {
643		/* most likely a chip reset */
644		mps_dprint(sc, MPS_FAULT,
645		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
646		mpssas_free_tm(sc, tm);
647		return;
648	}
649
650	mps_dprint(sc, MPS_XINFO,
651	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
652	    handle, le16toh(reply->IOCStatus));
653
654	/*
655	 * Don't clear target if remove fails because things will get confusing.
656	 * Leave the devname and sasaddr intact so that we know to avoid reusing
657	 * this target id if possible, and so we can assign the same target id
658	 * to this device if it comes back in the future.
659	 */
660	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
661		targ = tm->cm_targ;
662		targ->handle = 0x0;
663		targ->encl_handle = 0x0;
664		targ->encl_slot = 0x0;
665		targ->exp_dev_handle = 0x0;
666		targ->phy_num = 0x0;
667		targ->linkrate = 0x0;
668		targ->devinfo = 0x0;
669		targ->flags = 0x0;
670
671		while(!SLIST_EMPTY(&targ->luns)) {
672			lun = SLIST_FIRST(&targ->luns);
673			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
674			free(lun, M_MPT2);
675		}
676	}
677
678
679	mpssas_free_tm(sc, tm);
680}
681
682static int
683mpssas_register_events(struct mps_softc *sc)
684{
685	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
686
687	bzero(events, 16);
688	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
689	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
690	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
691	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
692	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
693	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
694	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
695	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
696	setbit(events, MPI2_EVENT_IR_VOLUME);
697	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
698	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
699	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
700
701	mps_register_events(sc, events, mpssas_evt_handler, NULL,
702	    &sc->sassc->mpssas_eh);
703
704	return (0);
705}
706
707int
708mps_attach_sas(struct mps_softc *sc)
709{
710	struct mpssas_softc *sassc;
711	cam_status status;
712	int unit, error = 0;
713
714	MPS_FUNCTRACE(sc);
715
716	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
717	if(!sassc) {
718		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
719		__func__, __LINE__);
720		return (ENOMEM);
721	}
722
723	/*
724	 * XXX MaxTargets could change during a reinit.  Since we don't
725	 * resize the targets[] array during such an event, cache the value
726	 * of MaxTargets here so that we don't get into trouble later.  This
727	 * should move into the reinit logic.
728	 */
729	sassc->maxtargets = sc->facts->MaxTargets;
730	sassc->targets = malloc(sizeof(struct mpssas_target) *
731	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
732	if(!sassc->targets) {
733		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
734		__func__, __LINE__);
735		free(sassc, M_MPT2);
736		return (ENOMEM);
737	}
738	sc->sassc = sassc;
739	sassc->sc = sc;
740
741	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
742		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
743		error = ENOMEM;
744		goto out;
745	}
746
747	unit = device_get_unit(sc->mps_dev);
748	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
749	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
750	if (sassc->sim == NULL) {
751		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
752		error = EINVAL;
753		goto out;
754	}
755
756	TAILQ_INIT(&sassc->ev_queue);
757
758	/* Initialize taskqueue for Event Handling */
759	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
760	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
761	    taskqueue_thread_enqueue, &sassc->ev_tq);
762	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
763	    device_get_nameunit(sc->mps_dev));
764
765	mps_lock(sc);
766
767	/*
768	 * XXX There should be a bus for every port on the adapter, but since
769	 * we're just going to fake the topology for now, we'll pretend that
770	 * everything is just a target on a single bus.
771	 */
772	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
773		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
774		    error);
775		mps_unlock(sc);
776		goto out;
777	}
778
779	/*
780	 * Assume that discovery events will start right away.
781	 *
782	 * Hold off boot until discovery is complete.
783	 */
784	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
785	sc->sassc->startup_refcount = 0;
786	mpssas_startup_increment(sassc);
787
788	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
789	sassc->discovery_timeouts = 0;
790
791	sassc->tm_count = 0;
792
793	/*
794	 * Register for async events so we can determine the EEDP
795	 * capabilities of devices.
796	 */
797	status = xpt_create_path(&sassc->path, /*periph*/NULL,
798	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
799	    CAM_LUN_WILDCARD);
800	if (status != CAM_REQ_CMP) {
801		mps_printf(sc, "Error %#x creating sim path\n", status);
802		sassc->path = NULL;
803	} else {
804		int event;
805
806#if (__FreeBSD_version >= 1000006) || \
807    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
808		event = AC_ADVINFO_CHANGED;
809#else
810		event = AC_FOUND_DEVICE;
811#endif
812		status = xpt_register_async(event, mpssas_async, sc,
813					    sassc->path);
814		if (status != CAM_REQ_CMP) {
815			mps_dprint(sc, MPS_ERROR,
816			    "Error %#x registering async handler for "
817			    "AC_ADVINFO_CHANGED events\n", status);
818			xpt_free_path(sassc->path);
819			sassc->path = NULL;
820		}
821	}
822	if (status != CAM_REQ_CMP) {
823		/*
824		 * EEDP use is the exception, not the rule.
825		 * Warn the user, but do not fail to attach.
826		 */
827		mps_printf(sc, "EEDP capabilities disabled.\n");
828	}
829
830	mps_unlock(sc);
831
832	mpssas_register_events(sc);
833out:
834	if (error)
835		mps_detach_sas(sc);
836	return (error);
837}
838
839int
840mps_detach_sas(struct mps_softc *sc)
841{
842	struct mpssas_softc *sassc;
843	struct mpssas_lun *lun, *lun_tmp;
844	struct mpssas_target *targ;
845	int i;
846
847	MPS_FUNCTRACE(sc);
848
849	if (sc->sassc == NULL)
850		return (0);
851
852	sassc = sc->sassc;
853	mps_deregister_events(sc, sassc->mpssas_eh);
854
855	/*
856	 * Drain and free the event handling taskqueue with the lock
857	 * unheld so that any parallel processing tasks drain properly
858	 * without deadlocking.
859	 */
860	if (sassc->ev_tq != NULL)
861		taskqueue_free(sassc->ev_tq);
862
863	/* Make sure CAM doesn't wedge if we had to bail out early. */
864	mps_lock(sc);
865
866	/* Deregister our async handler */
867	if (sassc->path != NULL) {
868		xpt_register_async(0, mpssas_async, sc, sassc->path);
869		xpt_free_path(sassc->path);
870		sassc->path = NULL;
871	}
872
873	if (sassc->flags & MPSSAS_IN_STARTUP)
874		xpt_release_simq(sassc->sim, 1);
875
876	if (sassc->sim != NULL) {
877		xpt_bus_deregister(cam_sim_path(sassc->sim));
878		cam_sim_free(sassc->sim, FALSE);
879	}
880
881	sassc->flags |= MPSSAS_SHUTDOWN;
882	mps_unlock(sc);
883
884	if (sassc->devq != NULL)
885		cam_simq_free(sassc->devq);
886
887	for(i=0; i< sassc->maxtargets ;i++) {
888		targ = &sassc->targets[i];
889		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
890			free(lun, M_MPT2);
891		}
892	}
893	free(sassc->targets, M_MPT2);
894	free(sassc, M_MPT2);
895	sc->sassc = NULL;
896
897	return (0);
898}
899
900void
901mpssas_discovery_end(struct mpssas_softc *sassc)
902{
903	struct mps_softc *sc = sassc->sc;
904
905	MPS_FUNCTRACE(sc);
906
907	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
908		callout_stop(&sassc->discovery_callout);
909
910}
911
912static void
913mpssas_discovery_timeout(void *data)
914{
915	struct mpssas_softc *sassc = data;
916	struct mps_softc *sc;
917
918	sc = sassc->sc;
919	MPS_FUNCTRACE(sc);
920
921	mps_lock(sc);
922	mps_dprint(sc, MPS_INFO,
923	    "Timeout waiting for discovery, interrupts may not be working!\n");
924	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
925
926	/* Poll the hardware for events in case interrupts aren't working */
927	mps_intr_locked(sc);
928
929	mps_dprint(sassc->sc, MPS_INFO,
930	    "Finished polling after discovery timeout at %d\n", ticks);
931
932	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
933		mpssas_discovery_end(sassc);
934	} else {
935		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
936			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
937			callout_reset(&sassc->discovery_callout,
938			    MPSSAS_DISCOVERY_TIMEOUT * hz,
939			    mpssas_discovery_timeout, sassc);
940			sassc->discovery_timeouts++;
941		} else {
942			mps_dprint(sassc->sc, MPS_FAULT,
943			    "Discovery timed out, continuing.\n");
944			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
945			mpssas_discovery_end(sassc);
946		}
947	}
948
949	mps_unlock(sc);
950}
951
952static void
953mpssas_action(struct cam_sim *sim, union ccb *ccb)
954{
955	struct mpssas_softc *sassc;
956
957	sassc = cam_sim_softc(sim);
958
959	MPS_FUNCTRACE(sassc->sc);
960	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
961	    ccb->ccb_h.func_code);
962	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
963
964	switch (ccb->ccb_h.func_code) {
965	case XPT_PATH_INQ:
966	{
967		struct ccb_pathinq *cpi = &ccb->cpi;
968
969		cpi->version_num = 1;
970		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
971		cpi->target_sprt = 0;
972#if __FreeBSD_version >= 1000039
973		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
974#else
975		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
976#endif
977		cpi->hba_eng_cnt = 0;
978		cpi->max_target = sassc->maxtargets - 1;
979		cpi->max_lun = 255;
980		cpi->initiator_id = sassc->maxtargets - 1;
981		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
982		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
983		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
984		cpi->unit_number = cam_sim_unit(sim);
985		cpi->bus_id = cam_sim_bus(sim);
986		cpi->base_transfer_speed = 150000;
987		cpi->transport = XPORT_SAS;
988		cpi->transport_version = 0;
989		cpi->protocol = PROTO_SCSI;
990		cpi->protocol_version = SCSI_REV_SPC;
991#if __FreeBSD_version >= 800001
992		/*
993		 * XXX KDM where does this number come from?
994		 */
995		cpi->maxio = 256 * 1024;
996#endif
997		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
998		break;
999	}
1000	case XPT_GET_TRAN_SETTINGS:
1001	{
1002		struct ccb_trans_settings	*cts;
1003		struct ccb_trans_settings_sas	*sas;
1004		struct ccb_trans_settings_scsi	*scsi;
1005		struct mpssas_target *targ;
1006
1007		cts = &ccb->cts;
1008		sas = &cts->xport_specific.sas;
1009		scsi = &cts->proto_specific.scsi;
1010
1011		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1012		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013		    cts->ccb_h.target_id));
1014		targ = &sassc->targets[cts->ccb_h.target_id];
1015		if (targ->handle == 0x0) {
1016			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1017			break;
1018		}
1019
1020		cts->protocol_version = SCSI_REV_SPC2;
1021		cts->transport = XPORT_SAS;
1022		cts->transport_version = 0;
1023
1024		sas->valid = CTS_SAS_VALID_SPEED;
1025		switch (targ->linkrate) {
1026		case 0x08:
1027			sas->bitrate = 150000;
1028			break;
1029		case 0x09:
1030			sas->bitrate = 300000;
1031			break;
1032		case 0x0a:
1033			sas->bitrate = 600000;
1034			break;
1035		default:
1036			sas->valid = 0;
1037		}
1038
1039		cts->protocol = PROTO_SCSI;
1040		scsi->valid = CTS_SCSI_VALID_TQ;
1041		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1042
1043		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1044		break;
1045	}
1046	case XPT_CALC_GEOMETRY:
1047		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1048		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1049		break;
1050	case XPT_RESET_DEV:
1051		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052		mpssas_action_resetdev(sassc, ccb);
1053		return;
1054	case XPT_RESET_BUS:
1055	case XPT_ABORT:
1056	case XPT_TERM_IO:
1057		mps_dprint(sassc->sc, MPS_XINFO,
1058		    "mpssas_action faking success for abort or reset\n");
1059		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060		break;
1061	case XPT_SCSI_IO:
1062		mpssas_action_scsiio(sassc, ccb);
1063		return;
1064#if __FreeBSD_version >= 900026
1065	case XPT_SMP_IO:
1066		mpssas_action_smpio(sassc, ccb);
1067		return;
1068#endif
1069	default:
1070		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1071		break;
1072	}
1073	xpt_done(ccb);
1074
1075}
1076
1077static void
1078mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1079    target_id_t target_id, lun_id_t lun_id)
1080{
1081	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1082	struct cam_path *path;
1083
1084	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1085	    ac_code, target_id, (uintmax_t)lun_id);
1086
1087	if (xpt_create_path(&path, NULL,
1088		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1089		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1090			   "notification\n");
1091		return;
1092	}
1093
1094	xpt_async(ac_code, path, NULL);
1095	xpt_free_path(path);
1096}
1097
1098static void
1099mpssas_complete_all_commands(struct mps_softc *sc)
1100{
1101	struct mps_command *cm;
1102	int i;
1103	int completed;
1104
1105	MPS_FUNCTRACE(sc);
1106	mtx_assert(&sc->mps_mtx, MA_OWNED);
1107
1108	/* complete all commands with a NULL reply */
1109	for (i = 1; i < sc->num_reqs; i++) {
1110		cm = &sc->commands[i];
1111		cm->cm_reply = NULL;
1112		completed = 0;
1113
1114		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1116
1117		if (cm->cm_complete != NULL) {
1118			mpssas_log_command(cm, MPS_RECOVERY,
1119			    "completing cm %p state %x ccb %p for diag reset\n",
1120			    cm, cm->cm_state, cm->cm_ccb);
1121
1122			cm->cm_complete(sc, cm);
1123			completed = 1;
1124		}
1125
1126		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127			mpssas_log_command(cm, MPS_RECOVERY,
1128			    "waking up cm %p state %x ccb %p for diag reset\n",
1129			    cm, cm->cm_state, cm->cm_ccb);
1130			wakeup(cm);
1131			completed = 1;
1132		}
1133
1134		if (cm->cm_sc->io_cmds_active != 0) {
1135			cm->cm_sc->io_cmds_active--;
1136		} else {
1137			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1138			    "io_cmds_active is out of sync - resynching to "
1139			    "0\n");
1140		}
1141
1142		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1143			/* this should never happen, but if it does, log */
1144			mpssas_log_command(cm, MPS_RECOVERY,
1145			    "cm %p state %x flags 0x%x ccb %p during diag "
1146			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1147			    cm->cm_ccb);
1148		}
1149	}
1150}
1151
1152void
1153mpssas_handle_reinit(struct mps_softc *sc)
1154{
1155	int i;
1156
1157	/* Go back into startup mode and freeze the simq, so that CAM
1158	 * doesn't send any commands until after we've rediscovered all
1159	 * targets and found the proper device handles for them.
1160	 *
1161	 * After the reset, portenable will trigger discovery, and after all
1162	 * discovery-related activities have finished, the simq will be
1163	 * released.
1164	 */
1165	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1166	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1167	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1168	mpssas_startup_increment(sc->sassc);
1169
1170	/* notify CAM of a bus reset */
1171	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1172	    CAM_LUN_WILDCARD);
1173
1174	/* complete and cleanup after all outstanding commands */
1175	mpssas_complete_all_commands(sc);
1176
1177	mps_dprint(sc, MPS_INIT,
1178	    "%s startup %u tm %u after command completion\n",
1179	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1180
1181	/* zero all the target handles, since they may change after the
1182	 * reset, and we have to rediscover all the targets and use the new
1183	 * handles.
1184	 */
1185	for (i = 0; i < sc->sassc->maxtargets; i++) {
1186		if (sc->sassc->targets[i].outstanding != 0)
1187			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1188			    i, sc->sassc->targets[i].outstanding);
1189		sc->sassc->targets[i].handle = 0x0;
1190		sc->sassc->targets[i].exp_dev_handle = 0x0;
1191		sc->sassc->targets[i].outstanding = 0;
1192		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1193	}
1194}
1195
1196static void
1197mpssas_tm_timeout(void *data)
1198{
1199	struct mps_command *tm = data;
1200	struct mps_softc *sc = tm->cm_sc;
1201
1202	mtx_assert(&sc->mps_mtx, MA_OWNED);
1203
1204	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1205	    "task mgmt %p timed out\n", tm);
1206	mps_reinit(sc);
1207}
1208
1209static void
1210mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1211{
1212	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1213	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1214	unsigned int cm_count = 0;
1215	struct mps_command *cm;
1216	struct mpssas_target *targ;
1217
1218	callout_stop(&tm->cm_callout);
1219
1220	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1221	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1222	targ = tm->cm_targ;
1223
1224	/*
1225	 * Currently there should be no way we can hit this case.  It only
1226	 * happens when we have a failure to allocate chain frames, and
1227	 * task management commands don't have S/G lists.
1228	 * XXXSL So should it be an assertion?
1229	 */
1230	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1231		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1232			   "This should not happen!\n", __func__, tm->cm_flags);
1233		mpssas_free_tm(sc, tm);
1234		return;
1235	}
1236
1237	if (reply == NULL) {
1238		mpssas_log_command(tm, MPS_RECOVERY,
1239		    "NULL reset reply for tm %p\n", tm);
1240		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1241			/* this completion was due to a reset, just cleanup */
1242			targ->flags &= ~MPSSAS_TARGET_INRESET;
1243			targ->tm = NULL;
1244			mpssas_free_tm(sc, tm);
1245		}
1246		else {
1247			/* we should have gotten a reply. */
1248			mps_reinit(sc);
1249		}
1250		return;
1251	}
1252
1253	mpssas_log_command(tm, MPS_RECOVERY,
1254	    "logical unit reset status 0x%x code 0x%x count %u\n",
1255	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1256	    le32toh(reply->TerminationCount));
1257
1258	/* See if there are any outstanding commands for this LUN.
1259	 * This could be made more efficient by using a per-LU data
1260	 * structure of some sort.
1261	 */
1262	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1263		if (cm->cm_lun == tm->cm_lun)
1264			cm_count++;
1265	}
1266
1267	if (cm_count == 0) {
1268		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1269		    "logical unit %u finished recovery after reset\n",
1270		    tm->cm_lun, tm);
1271
1272		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1273		    tm->cm_lun);
1274
1275		/* we've finished recovery for this logical unit.  check and
1276		 * see if some other logical unit has a timedout command
1277		 * that needs to be processed.
1278		 */
1279		cm = TAILQ_FIRST(&targ->timedout_commands);
1280		if (cm) {
1281			mpssas_send_abort(sc, tm, cm);
1282		}
1283		else {
1284			targ->tm = NULL;
1285			mpssas_free_tm(sc, tm);
1286		}
1287	}
1288	else {
1289		/* if we still have commands for this LUN, the reset
1290		 * effectively failed, regardless of the status reported.
1291		 * Escalate to a target reset.
1292		 */
1293		mpssas_log_command(tm, MPS_RECOVERY,
1294		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1295		    tm, cm_count);
1296		mpssas_send_reset(sc, tm,
1297		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1298	}
1299}
1300
1301static void
1302mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1303{
1304	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1305	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1306	struct mpssas_target *targ;
1307
1308	callout_stop(&tm->cm_callout);
1309
1310	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1311	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1312	targ = tm->cm_targ;
1313
1314	/*
1315	 * Currently there should be no way we can hit this case.  It only
1316	 * happens when we have a failure to allocate chain frames, and
1317	 * task management commands don't have S/G lists.
1318	 */
1319	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1320		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1321			   "This should not happen!\n", __func__, tm->cm_flags);
1322		mpssas_free_tm(sc, tm);
1323		return;
1324	}
1325
1326	if (reply == NULL) {
1327		mpssas_log_command(tm, MPS_RECOVERY,
1328		    "NULL reset reply for tm %p\n", tm);
1329		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1330			/* this completion was due to a reset, just cleanup */
1331			targ->flags &= ~MPSSAS_TARGET_INRESET;
1332			targ->tm = NULL;
1333			mpssas_free_tm(sc, tm);
1334		}
1335		else {
1336			/* we should have gotten a reply. */
1337			mps_reinit(sc);
1338		}
1339		return;
1340	}
1341
1342	mpssas_log_command(tm, MPS_RECOVERY,
1343	    "target reset status 0x%x code 0x%x count %u\n",
1344	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1345	    le32toh(reply->TerminationCount));
1346
1347	targ->flags &= ~MPSSAS_TARGET_INRESET;
1348
1349	if (targ->outstanding == 0) {
1350		/* we've finished recovery for this target and all
1351		 * of its logical units.
1352		 */
1353		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1354		    "recovery finished after target reset\n");
1355
1356		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1357		    CAM_LUN_WILDCARD);
1358
1359		targ->tm = NULL;
1360		mpssas_free_tm(sc, tm);
1361	}
1362	else {
1363		/* after a target reset, if this target still has
1364		 * outstanding commands, the reset effectively failed,
1365		 * regardless of the status reported.  escalate.
1366		 */
1367		mpssas_log_command(tm, MPS_RECOVERY,
1368		    "target reset complete for tm %p, but still have %u command(s)\n",
1369		    tm, targ->outstanding);
1370		mps_reinit(sc);
1371	}
1372}
1373
1374#define MPS_RESET_TIMEOUT 30
1375
1376static int
1377mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1378{
1379	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1380	struct mpssas_target *target;
1381	int err;
1382
1383	target = tm->cm_targ;
1384	if (target->handle == 0) {
1385		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1386		    __func__, target->tid);
1387		return -1;
1388	}
1389
1390	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1391	req->DevHandle = htole16(target->handle);
1392	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1393	req->TaskType = type;
1394
1395	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1396		/* XXX Need to handle invalid LUNs */
1397		MPS_SET_LUN(req->LUN, tm->cm_lun);
1398		tm->cm_targ->logical_unit_resets++;
1399		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1400		    "sending logical unit reset\n");
1401		tm->cm_complete = mpssas_logical_unit_reset_complete;
1402	}
1403	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1404		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1405		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1406		tm->cm_targ->target_resets++;
1407		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1408		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1409		    "sending target reset\n");
1410		tm->cm_complete = mpssas_target_reset_complete;
1411	}
1412	else {
1413		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1414		return -1;
1415	}
1416
1417	tm->cm_data = NULL;
1418	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1419	tm->cm_complete_data = (void *)tm;
1420
1421	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1422	    mpssas_tm_timeout, tm);
1423
1424	err = mps_map_command(sc, tm);
1425	if (err)
1426		mpssas_log_command(tm, MPS_RECOVERY,
1427		    "error %d sending reset type %u\n",
1428		    err, type);
1429
1430	return err;
1431}
1432
1433
1434static void
1435mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1436{
1437	struct mps_command *cm;
1438	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1439	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1440	struct mpssas_target *targ;
1441
1442	callout_stop(&tm->cm_callout);
1443
1444	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1445	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1446	targ = tm->cm_targ;
1447
1448	/*
1449	 * Currently there should be no way we can hit this case.  It only
1450	 * happens when we have a failure to allocate chain frames, and
1451	 * task management commands don't have S/G lists.
1452	 */
1453	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1454		mpssas_log_command(tm, MPS_RECOVERY,
1455		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1456		    tm->cm_flags, tm, le16toh(req->TaskMID));
1457		mpssas_free_tm(sc, tm);
1458		return;
1459	}
1460
1461	if (reply == NULL) {
1462		mpssas_log_command(tm, MPS_RECOVERY,
1463		    "NULL abort reply for tm %p TaskMID %u\n",
1464		    tm, le16toh(req->TaskMID));
1465		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1466			/* this completion was due to a reset, just cleanup */
1467			targ->tm = NULL;
1468			mpssas_free_tm(sc, tm);
1469		}
1470		else {
1471			/* we should have gotten a reply. */
1472			mps_reinit(sc);
1473		}
1474		return;
1475	}
1476
1477	mpssas_log_command(tm, MPS_RECOVERY,
1478	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1479	    le16toh(req->TaskMID),
1480	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1481	    le32toh(reply->TerminationCount));
1482
1483	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1484	if (cm == NULL) {
1485		/* if there are no more timedout commands, we're done with
1486		 * error recovery for this target.
1487		 */
1488		mpssas_log_command(tm, MPS_RECOVERY,
1489		    "finished recovery after aborting TaskMID %u\n",
1490		    le16toh(req->TaskMID));
1491
1492		targ->tm = NULL;
1493		mpssas_free_tm(sc, tm);
1494	}
1495	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1496		/* abort success, but we have more timedout commands to abort */
1497		mpssas_log_command(tm, MPS_RECOVERY,
1498		    "continuing recovery after aborting TaskMID %u\n",
1499		    le16toh(req->TaskMID));
1500
1501		mpssas_send_abort(sc, tm, cm);
1502	}
1503	else {
1504		/* we didn't get a command completion, so the abort
1505		 * failed as far as we're concerned.  escalate.
1506		 */
1507		mpssas_log_command(tm, MPS_RECOVERY,
1508		    "abort failed for TaskMID %u tm %p\n",
1509		    le16toh(req->TaskMID), tm);
1510
1511		mpssas_send_reset(sc, tm,
1512		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1513	}
1514}
1515
1516#define MPS_ABORT_TIMEOUT 5
1517
1518static int
1519mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1520{
1521	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1522	struct mpssas_target *targ;
1523	int err;
1524
1525	targ = cm->cm_targ;
1526	if (targ->handle == 0) {
1527		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1528		    __func__, cm->cm_ccb->ccb_h.target_id);
1529		return -1;
1530	}
1531
1532	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1533	    "Aborting command %p\n", cm);
1534
1535	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1536	req->DevHandle = htole16(targ->handle);
1537	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1538	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1539
1540	/* XXX Need to handle invalid LUNs */
1541	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1542
1543	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1544
1545	tm->cm_data = NULL;
1546	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1547	tm->cm_complete = mpssas_abort_complete;
1548	tm->cm_complete_data = (void *)tm;
1549	tm->cm_targ = cm->cm_targ;
1550	tm->cm_lun = cm->cm_lun;
1551
1552	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1553	    mpssas_tm_timeout, tm);
1554
1555	targ->aborts++;
1556
1557	err = mps_map_command(sc, tm);
1558	if (err)
1559		mpssas_log_command(tm, MPS_RECOVERY,
1560		    "error %d sending abort for cm %p SMID %u\n",
1561		    err, cm, req->TaskMID);
1562	return err;
1563}
1564
1565
1566static void
1567mpssas_scsiio_timeout(void *data)
1568{
1569	struct mps_softc *sc;
1570	struct mps_command *cm;
1571	struct mpssas_target *targ;
1572
1573	cm = (struct mps_command *)data;
1574	sc = cm->cm_sc;
1575
1576	MPS_FUNCTRACE(sc);
1577	mtx_assert(&sc->mps_mtx, MA_OWNED);
1578
1579	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1580
1581	/*
1582	 * Run the interrupt handler to make sure it's not pending.  This
1583	 * isn't perfect because the command could have already completed
1584	 * and been re-used, though this is unlikely.
1585	 */
1586	mps_intr_locked(sc);
1587	if (cm->cm_state == MPS_CM_STATE_FREE) {
1588		mpssas_log_command(cm, MPS_XINFO,
1589		    "SCSI command %p almost timed out\n", cm);
1590		return;
1591	}
1592
1593	if (cm->cm_ccb == NULL) {
1594		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1595		return;
1596	}
1597
1598	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1599	    cm, cm->cm_ccb);
1600
1601	targ = cm->cm_targ;
1602	targ->timeouts++;
1603
1604	/* XXX first, check the firmware state, to see if it's still
1605	 * operational.  if not, do a diag reset.
1606	 */
1607	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1608	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1609	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1610
1611	if (targ->tm != NULL) {
1612		/* target already in recovery, just queue up another
1613		 * timedout command to be processed later.
1614		 */
1615		mps_dprint(sc, MPS_RECOVERY,
1616		    "queued timedout cm %p for processing by tm %p\n",
1617		    cm, targ->tm);
1618	}
1619	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1620		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1621		    cm, targ->tm);
1622
1623		/* start recovery by aborting the first timedout command */
1624		mpssas_send_abort(sc, targ->tm, cm);
1625	}
1626	else {
1627		/* XXX queue this target up for recovery once a TM becomes
1628		 * available.  The firmware only has a limited number of
1629		 * HighPriority credits for the high priority requests used
1630		 * for task management, and we ran out.
1631		 *
1632		 * Isilon: don't worry about this for now, since we have
1633		 * more credits than disks in an enclosure, and limit
1634		 * ourselves to one TM per target for recovery.
1635		 */
1636		mps_dprint(sc, MPS_RECOVERY,
1637		    "timedout cm %p failed to allocate a tm\n", cm);
1638	}
1639
1640}
1641
1642static void
1643mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1644{
1645	MPI2_SCSI_IO_REQUEST *req;
1646	struct ccb_scsiio *csio;
1647	struct mps_softc *sc;
1648	struct mpssas_target *targ;
1649	struct mpssas_lun *lun;
1650	struct mps_command *cm;
1651	uint8_t i, lba_byte, *ref_tag_addr;
1652	uint16_t eedp_flags;
1653	uint32_t mpi_control;
1654
1655	sc = sassc->sc;
1656	MPS_FUNCTRACE(sc);
1657	mtx_assert(&sc->mps_mtx, MA_OWNED);
1658
1659	csio = &ccb->csio;
1660	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1661	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1662	     csio->ccb_h.target_id));
1663	targ = &sassc->targets[csio->ccb_h.target_id];
1664	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1665	if (targ->handle == 0x0) {
1666		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1667		    __func__, csio->ccb_h.target_id);
1668		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1669		xpt_done(ccb);
1670		return;
1671	}
1672	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1673		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1674		    "supported %u\n", __func__, csio->ccb_h.target_id);
1675		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1676		xpt_done(ccb);
1677		return;
1678	}
1679	/*
1680	 * Sometimes, it is possible to get a command that is not "In
1681	 * Progress" and was actually aborted by the upper layer.  Check for
1682	 * this here and complete the command without error.
1683	 */
1684	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1685		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1686		    "target %u\n", __func__, csio->ccb_h.target_id);
1687		xpt_done(ccb);
1688		return;
1689	}
1690	/*
1691	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1692	 * that the volume has timed out.  We want volumes to be enumerated
1693	 * until they are deleted/removed, not just failed.
1694	 */
1695	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1696		if (targ->devinfo == 0)
1697			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1698		else
1699			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1700		xpt_done(ccb);
1701		return;
1702	}
1703
1704	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1705		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1706		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1707		xpt_done(ccb);
1708		return;
1709	}
1710
1711	cm = mps_alloc_command(sc);
1712	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1713		if (cm != NULL) {
1714			mps_free_command(sc, cm);
1715		}
1716		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1717			xpt_freeze_simq(sassc->sim, 1);
1718			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1719		}
1720		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1721		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1722		xpt_done(ccb);
1723		return;
1724	}
1725
1726	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1727	bzero(req, sizeof(*req));
1728	req->DevHandle = htole16(targ->handle);
1729	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1730	req->MsgFlags = 0;
1731	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1732	req->SenseBufferLength = MPS_SENSE_LEN;
1733	req->SGLFlags = 0;
1734	req->ChainOffset = 0;
1735	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1736	req->SGLOffset1= 0;
1737	req->SGLOffset2= 0;
1738	req->SGLOffset3= 0;
1739	req->SkipCount = 0;
1740	req->DataLength = htole32(csio->dxfer_len);
1741	req->BidirectionalDataLength = 0;
1742	req->IoFlags = htole16(csio->cdb_len);
1743	req->EEDPFlags = 0;
1744
1745	/* Note: BiDirectional transfers are not supported */
1746	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1747	case CAM_DIR_IN:
1748		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1749		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1750		break;
1751	case CAM_DIR_OUT:
1752		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1753		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1754		break;
1755	case CAM_DIR_NONE:
1756	default:
1757		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1758		break;
1759	}
1760
1761	if (csio->cdb_len == 32)
1762                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1763	/*
1764	 * It looks like the hardware doesn't require an explicit tag
1765	 * number for each transaction.  SAM Task Management not supported
1766	 * at the moment.
1767	 */
1768	switch (csio->tag_action) {
1769	case MSG_HEAD_OF_Q_TAG:
1770		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1771		break;
1772	case MSG_ORDERED_Q_TAG:
1773		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1774		break;
1775	case MSG_ACA_TASK:
1776		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1777		break;
1778	case CAM_TAG_ACTION_NONE:
1779	case MSG_SIMPLE_Q_TAG:
1780	default:
1781		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1782		break;
1783	}
1784	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1785	req->Control = htole32(mpi_control);
1786	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1787		mps_free_command(sc, cm);
1788		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1789		xpt_done(ccb);
1790		return;
1791	}
1792
1793	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1794		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1795	else
1796		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1797	req->IoFlags = htole16(csio->cdb_len);
1798
1799	/*
1800	 * Check if EEDP is supported and enabled.  If it is then check if the
1801	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1802	 * is formatted for EEDP support.  If all of this is true, set CDB up
1803	 * for EEDP transfer.
1804	 */
1805	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1806	if (sc->eedp_enabled && eedp_flags) {
1807		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1808			if (lun->lun_id == csio->ccb_h.target_lun) {
1809				break;
1810			}
1811		}
1812
1813		if ((lun != NULL) && (lun->eedp_formatted)) {
1814			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1815			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1816			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1817			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1818			req->EEDPFlags = htole16(eedp_flags);
1819
1820			/*
1821			 * If CDB less than 32, fill in Primary Ref Tag with
1822			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1823			 * already there.  Also, set protection bit.  FreeBSD
1824			 * currently does not support CDBs bigger than 16, but
1825			 * the code doesn't hurt, and will be here for the
1826			 * future.
1827			 */
1828			if (csio->cdb_len != 32) {
1829				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1830				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1831				    PrimaryReferenceTag;
1832				for (i = 0; i < 4; i++) {
1833					*ref_tag_addr =
1834					    req->CDB.CDB32[lba_byte + i];
1835					ref_tag_addr++;
1836				}
1837				req->CDB.EEDP32.PrimaryReferenceTag =
1838					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1839				req->CDB.EEDP32.PrimaryApplicationTagMask =
1840				    0xFFFF;
1841				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1842				    0x20;
1843			} else {
1844				eedp_flags |=
1845				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1846				req->EEDPFlags = htole16(eedp_flags);
1847				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1848				    0x1F) | 0x20;
1849			}
1850		}
1851	}
1852
1853	cm->cm_length = csio->dxfer_len;
1854	if (cm->cm_length != 0) {
1855		cm->cm_data = ccb;
1856		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1857	} else {
1858		cm->cm_data = NULL;
1859	}
1860	cm->cm_sge = &req->SGL;
1861	cm->cm_sglsize = (32 - 24) * 4;
1862	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1863	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1864	cm->cm_complete = mpssas_scsiio_complete;
1865	cm->cm_complete_data = ccb;
1866	cm->cm_targ = targ;
1867	cm->cm_lun = csio->ccb_h.target_lun;
1868	cm->cm_ccb = ccb;
1869
1870	/*
1871	 * If HBA is a WD and the command is not for a retry, try to build a
1872	 * direct I/O message. If failed, or the command is for a retry, send
1873	 * the I/O to the IR volume itself.
1874	 */
1875	if (sc->WD_valid_config) {
1876		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1877			mpssas_direct_drive_io(sassc, cm, ccb);
1878		} else {
1879			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1880		}
1881	}
1882
1883	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1884	   mpssas_scsiio_timeout, cm);
1885
1886	targ->issued++;
1887	targ->outstanding++;
1888	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1889	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1890
1891	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1892	    __func__, cm, ccb, targ->outstanding);
1893
1894	mps_map_command(sc, cm);
1895	return;
1896}
1897
1898static void
1899mps_response_code(struct mps_softc *sc, u8 response_code)
1900{
1901        char *desc;
1902
1903        switch (response_code) {
1904        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1905                desc = "task management request completed";
1906                break;
1907        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1908                desc = "invalid frame";
1909                break;
1910        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1911                desc = "task management request not supported";
1912                break;
1913        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1914                desc = "task management request failed";
1915                break;
1916        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1917                desc = "task management request succeeded";
1918                break;
1919        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1920                desc = "invalid lun";
1921                break;
1922        case 0xA:
1923                desc = "overlapped tag attempted";
1924                break;
1925        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1926                desc = "task queued, however not sent to target";
1927                break;
1928        default:
1929                desc = "unknown";
1930                break;
1931        }
1932		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1933                response_code, desc);
1934}
1935/**
1936 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1937 */
1938static void
1939mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1940    Mpi2SCSIIOReply_t *mpi_reply)
1941{
1942	u32 response_info;
1943	u8 *response_bytes;
1944	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1945	    MPI2_IOCSTATUS_MASK;
1946	u8 scsi_state = mpi_reply->SCSIState;
1947	u8 scsi_status = mpi_reply->SCSIStatus;
1948	char *desc_ioc_state = NULL;
1949	char *desc_scsi_status = NULL;
1950	char *desc_scsi_state = sc->tmp_string;
1951	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1952
1953	if (log_info == 0x31170000)
1954		return;
1955
1956	switch (ioc_status) {
1957	case MPI2_IOCSTATUS_SUCCESS:
1958		desc_ioc_state = "success";
1959		break;
1960	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1961		desc_ioc_state = "invalid function";
1962		break;
1963	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1964		desc_ioc_state = "scsi recovered error";
1965		break;
1966	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1967		desc_ioc_state = "scsi invalid dev handle";
1968		break;
1969	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1970		desc_ioc_state = "scsi device not there";
1971		break;
1972	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1973		desc_ioc_state = "scsi data overrun";
1974		break;
1975	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1976		desc_ioc_state = "scsi data underrun";
1977		break;
1978	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1979		desc_ioc_state = "scsi io data error";
1980		break;
1981	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1982		desc_ioc_state = "scsi protocol error";
1983		break;
1984	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1985		desc_ioc_state = "scsi task terminated";
1986		break;
1987	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1988		desc_ioc_state = "scsi residual mismatch";
1989		break;
1990	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1991		desc_ioc_state = "scsi task mgmt failed";
1992		break;
1993	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1994		desc_ioc_state = "scsi ioc terminated";
1995		break;
1996	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1997		desc_ioc_state = "scsi ext terminated";
1998		break;
1999	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2000		desc_ioc_state = "eedp guard error";
2001		break;
2002	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2003		desc_ioc_state = "eedp ref tag error";
2004		break;
2005	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2006		desc_ioc_state = "eedp app tag error";
2007		break;
2008	default:
2009		desc_ioc_state = "unknown";
2010		break;
2011	}
2012
2013	switch (scsi_status) {
2014	case MPI2_SCSI_STATUS_GOOD:
2015		desc_scsi_status = "good";
2016		break;
2017	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2018		desc_scsi_status = "check condition";
2019		break;
2020	case MPI2_SCSI_STATUS_CONDITION_MET:
2021		desc_scsi_status = "condition met";
2022		break;
2023	case MPI2_SCSI_STATUS_BUSY:
2024		desc_scsi_status = "busy";
2025		break;
2026	case MPI2_SCSI_STATUS_INTERMEDIATE:
2027		desc_scsi_status = "intermediate";
2028		break;
2029	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2030		desc_scsi_status = "intermediate condmet";
2031		break;
2032	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2033		desc_scsi_status = "reservation conflict";
2034		break;
2035	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2036		desc_scsi_status = "command terminated";
2037		break;
2038	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2039		desc_scsi_status = "task set full";
2040		break;
2041	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2042		desc_scsi_status = "aca active";
2043		break;
2044	case MPI2_SCSI_STATUS_TASK_ABORTED:
2045		desc_scsi_status = "task aborted";
2046		break;
2047	default:
2048		desc_scsi_status = "unknown";
2049		break;
2050	}
2051
2052	desc_scsi_state[0] = '\0';
2053	if (!scsi_state)
2054		desc_scsi_state = " ";
2055	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2056		strcat(desc_scsi_state, "response info ");
2057	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2058		strcat(desc_scsi_state, "state terminated ");
2059	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2060		strcat(desc_scsi_state, "no status ");
2061	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2062		strcat(desc_scsi_state, "autosense failed ");
2063	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2064		strcat(desc_scsi_state, "autosense valid ");
2065
2066	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2067	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2068	/* We can add more detail about underflow data here
2069	 * TO-DO
2070	 * */
2071	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2072	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2073	    desc_scsi_state, scsi_state);
2074
2075	if (sc->mps_debug & MPS_XINFO &&
2076		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2077		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2078		scsi_sense_print(csio);
2079		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2080	}
2081
2082	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2083		response_info = le32toh(mpi_reply->ResponseInfo);
2084		response_bytes = (u8 *)&response_info;
2085		mps_response_code(sc,response_bytes[0]);
2086	}
2087}
2088
2089static void
2090mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2091{
2092	MPI2_SCSI_IO_REPLY *rep;
2093	union ccb *ccb;
2094	struct ccb_scsiio *csio;
2095	struct mpssas_softc *sassc;
2096	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2097	u8 *TLR_bits, TLR_on;
2098	int dir = 0, i;
2099	u16 alloc_len;
2100
2101	MPS_FUNCTRACE(sc);
2102	mps_dprint(sc, MPS_TRACE,
2103	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2104	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2105	    cm->cm_targ->outstanding);
2106
2107	callout_stop(&cm->cm_callout);
2108	mtx_assert(&sc->mps_mtx, MA_OWNED);
2109
2110	sassc = sc->sassc;
2111	ccb = cm->cm_complete_data;
2112	csio = &ccb->csio;
2113	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2114	/*
2115	 * XXX KDM if the chain allocation fails, does it matter if we do
2116	 * the sync and unload here?  It is simpler to do it in every case,
2117	 * assuming it doesn't cause problems.
2118	 */
2119	if (cm->cm_data != NULL) {
2120		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2121			dir = BUS_DMASYNC_POSTREAD;
2122		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2123			dir = BUS_DMASYNC_POSTWRITE;
2124		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2125		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2126	}
2127
2128	cm->cm_targ->completed++;
2129	cm->cm_targ->outstanding--;
2130	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2131	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2132
2133	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2134		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2135		if (cm->cm_reply != NULL)
2136			mpssas_log_command(cm, MPS_RECOVERY,
2137			    "completed timedout cm %p ccb %p during recovery "
2138			    "ioc %x scsi %x state %x xfer %u\n",
2139			    cm, cm->cm_ccb,
2140			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2141			    le32toh(rep->TransferCount));
2142		else
2143			mpssas_log_command(cm, MPS_RECOVERY,
2144			    "completed timedout cm %p ccb %p during recovery\n",
2145			    cm, cm->cm_ccb);
2146	} else if (cm->cm_targ->tm != NULL) {
2147		if (cm->cm_reply != NULL)
2148			mpssas_log_command(cm, MPS_RECOVERY,
2149			    "completed cm %p ccb %p during recovery "
2150			    "ioc %x scsi %x state %x xfer %u\n",
2151			    cm, cm->cm_ccb,
2152			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2153			    le32toh(rep->TransferCount));
2154		else
2155			mpssas_log_command(cm, MPS_RECOVERY,
2156			    "completed cm %p ccb %p during recovery\n",
2157			    cm, cm->cm_ccb);
2158	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2159		mpssas_log_command(cm, MPS_RECOVERY,
2160		    "reset completed cm %p ccb %p\n",
2161		    cm, cm->cm_ccb);
2162	}
2163
2164	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2165		/*
2166		 * We ran into an error after we tried to map the command,
2167		 * so we're getting a callback without queueing the command
2168		 * to the hardware.  So we set the status here, and it will
2169		 * be retained below.  We'll go through the "fast path",
2170		 * because there can be no reply when we haven't actually
2171		 * gone out to the hardware.
2172		 */
2173		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2174
2175		/*
2176		 * Currently the only error included in the mask is
2177		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2178		 * chain frames.  We need to freeze the queue until we get
2179		 * a command that completed without this error, which will
2180		 * hopefully have some chain frames attached that we can
2181		 * use.  If we wanted to get smarter about it, we would
2182		 * only unfreeze the queue in this condition when we're
2183		 * sure that we're getting some chain frames back.  That's
2184		 * probably unnecessary.
2185		 */
2186		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2187			xpt_freeze_simq(sassc->sim, 1);
2188			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2189			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2190				   "freezing SIM queue\n");
2191		}
2192	}
2193
2194	/*
2195	 * If this is a Start Stop Unit command and it was issued by the driver
2196	 * during shutdown, decrement the refcount to account for all of the
2197	 * commands that were sent.  All SSU commands should be completed before
2198	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2199	 * is TRUE.
2200	 */
2201	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2202		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2203		sc->SSU_refcount--;
2204	}
2205
2206	/* Take the fast path to completion */
2207	if (cm->cm_reply == NULL) {
2208		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2209			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2210				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2211			else {
2212				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2213				ccb->csio.scsi_status = SCSI_STATUS_OK;
2214			}
2215			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2216				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2217				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2218				mps_dprint(sc, MPS_XINFO,
2219				    "Unfreezing SIM queue\n");
2220			}
2221		}
2222
2223		/*
2224		 * There are two scenarios where the status won't be
2225		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2226		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2227		 */
2228		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2229			/*
2230			 * Freeze the dev queue so that commands are
2231			 * executed in the correct order after error
2232			 * recovery.
2233			 */
2234			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2235			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2236		}
2237		mps_free_command(sc, cm);
2238		xpt_done(ccb);
2239		return;
2240	}
2241
2242	mpssas_log_command(cm, MPS_XINFO,
2243	    "ioc %x scsi %x state %x xfer %u\n",
2244	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2245	    le32toh(rep->TransferCount));
2246
2247	/*
2248	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2249	 * Volume if an error occurred (normal I/O retry).  Use the original
2250	 * CCB, but set a flag that this will be a retry so that it's sent to
2251	 * the original volume.  Free the command but reuse the CCB.
2252	 */
2253	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2254		mps_free_command(sc, cm);
2255		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2256		mpssas_action_scsiio(sassc, ccb);
2257		return;
2258	} else
2259		ccb->ccb_h.sim_priv.entries[0].field = 0;
2260
2261	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2262	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2263		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2264		/* FALLTHROUGH */
2265	case MPI2_IOCSTATUS_SUCCESS:
2266	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2267
2268		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2269		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2270			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2271
2272		/* Completion failed at the transport level. */
2273		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2274		    MPI2_SCSI_STATE_TERMINATED)) {
2275			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2276			break;
2277		}
2278
2279		/* In a modern packetized environment, an autosense failure
2280		 * implies that there's not much else that can be done to
2281		 * recover the command.
2282		 */
2283		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2284			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2285			break;
2286		}
2287
2288		/*
2289		 * CAM doesn't care about SAS Response Info data, but if this is
2290		 * the state check if TLR should be done.  If not, clear the
2291		 * TLR_bits for the target.
2292		 */
2293		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2294		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2295		    MPS_SCSI_RI_INVALID_FRAME)) {
2296			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2297			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2298		}
2299
2300		/*
2301		 * Intentionally override the normal SCSI status reporting
2302		 * for these two cases.  These are likely to happen in a
2303		 * multi-initiator environment, and we want to make sure that
2304		 * CAM retries these commands rather than fail them.
2305		 */
2306		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2307		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2308			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2309			break;
2310		}
2311
2312		/* Handle normal status and sense */
2313		csio->scsi_status = rep->SCSIStatus;
2314		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2315			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2316		else
2317			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2318
2319		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2320			int sense_len, returned_sense_len;
2321
2322			returned_sense_len = min(le32toh(rep->SenseCount),
2323			    sizeof(struct scsi_sense_data));
2324			if (returned_sense_len < ccb->csio.sense_len)
2325				ccb->csio.sense_resid = ccb->csio.sense_len -
2326					returned_sense_len;
2327			else
2328				ccb->csio.sense_resid = 0;
2329
2330			sense_len = min(returned_sense_len,
2331			    ccb->csio.sense_len - ccb->csio.sense_resid);
2332			bzero(&ccb->csio.sense_data,
2333			      sizeof(ccb->csio.sense_data));
2334			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2335			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2336		}
2337
2338		/*
2339		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2340		 * and it's page code 0 (Supported Page List), and there is
2341		 * inquiry data, and this is for a sequential access device, and
2342		 * the device is an SSP target, and TLR is supported by the
2343		 * controller, turn the TLR_bits value ON if page 0x90 is
2344		 * supported.
2345		 */
2346		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2347		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2348		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2349		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2350		    (csio->data_ptr != NULL) &&
2351		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2352		    (sc->control_TLR) &&
2353		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2354		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2355			vpd_list = (struct scsi_vpd_supported_page_list *)
2356			    csio->data_ptr;
2357			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2358			    TLR_bits;
2359			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2360			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2361			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2362			    csio->cdb_io.cdb_bytes[4];
2363			alloc_len -= csio->resid;
2364			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2365				if (vpd_list->list[i] == 0x90) {
2366					*TLR_bits = TLR_on;
2367					break;
2368				}
2369			}
2370		}
2371		break;
2372	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2373	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2374		/*
2375		 * If devinfo is 0 this will be a volume.  In that case don't
2376		 * tell CAM that the volume is not there.  We want volumes to
2377		 * be enumerated until they are deleted/removed, not just
2378		 * failed.
2379		 */
2380		if (cm->cm_targ->devinfo == 0)
2381			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2382		else
2383			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2384		break;
2385	case MPI2_IOCSTATUS_INVALID_SGL:
2386		mps_print_scsiio_cmd(sc, cm);
2387		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2388		break;
2389	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2390		/*
2391		 * This is one of the responses that comes back when an I/O
2392		 * has been aborted.  If it is because of a timeout that we
2393		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2394		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2395		 * command is the same (it gets retried, subject to the
2396		 * retry counter), the only difference is what gets printed
2397		 * on the console.
2398		 */
2399		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2400			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2401		else
2402			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2403		break;
2404	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2405		/* resid is ignored for this condition */
2406		csio->resid = 0;
2407		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2408		break;
2409	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2410	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2411		/*
2412		 * Since these are generally external (i.e. hopefully
2413		 * transient transport-related) errors, retry these without
2414		 * decrementing the retry count.
2415		 */
2416		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2417		mpssas_log_command(cm, MPS_INFO,
2418		    "terminated ioc %x scsi %x state %x xfer %u\n",
2419		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2420		    le32toh(rep->TransferCount));
2421		break;
2422	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2423	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2424	case MPI2_IOCSTATUS_INVALID_VPID:
2425	case MPI2_IOCSTATUS_INVALID_FIELD:
2426	case MPI2_IOCSTATUS_INVALID_STATE:
2427	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2428	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2429	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2430	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2431	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2432	default:
2433		mpssas_log_command(cm, MPS_XINFO,
2434		    "completed ioc %x scsi %x state %x xfer %u\n",
2435		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2436		    le32toh(rep->TransferCount));
2437		csio->resid = cm->cm_length;
2438		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2439		break;
2440	}
2441
2442	mps_sc_failed_io_info(sc,csio,rep);
2443
2444	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2445		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2446		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2447		mps_dprint(sc, MPS_XINFO, "Command completed, "
2448		    "unfreezing SIM queue\n");
2449	}
2450
2451	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2452		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2453		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2454	}
2455
2456	mps_free_command(sc, cm);
2457	xpt_done(ccb);
2458}
2459
2460/* All Request reached here are Endian safe */
2461static void
2462mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2463    union ccb *ccb) {
2464	pMpi2SCSIIORequest_t	pIO_req;
2465	struct mps_softc	*sc = sassc->sc;
2466	uint64_t		virtLBA;
2467	uint32_t		physLBA, stripe_offset, stripe_unit;
2468	uint32_t		io_size, column;
2469	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2470
2471	/*
2472	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2473	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2474	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2475	 * bit different than the 10/16 CDBs, handle them separately.
2476	 */
2477	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2478	CDB = pIO_req->CDB.CDB32;
2479
2480	/*
2481	 * Handle 6 byte CDBs.
2482	 */
2483	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2484	    (CDB[0] == WRITE_6))) {
2485		/*
2486		 * Get the transfer size in blocks.
2487		 */
2488		io_size = (cm->cm_length >> sc->DD_block_exponent);
2489
2490		/*
2491		 * Get virtual LBA given in the CDB.
2492		 */
2493		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2494		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2495
2496		/*
2497		 * Check that LBA range for I/O does not exceed volume's
2498		 * MaxLBA.
2499		 */
2500		if ((virtLBA + (uint64_t)io_size - 1) <=
2501		    sc->DD_max_lba) {
2502			/*
2503			 * Check if the I/O crosses a stripe boundary.  If not,
2504			 * translate the virtual LBA to a physical LBA and set
2505			 * the DevHandle for the PhysDisk to be used.  If it
2506			 * does cross a boundry, do normal I/O.  To get the
2507			 * right DevHandle to use, get the map number for the
2508			 * column, then use that map number to look up the
2509			 * DevHandle of the PhysDisk.
2510			 */
2511			stripe_offset = (uint32_t)virtLBA &
2512			    (sc->DD_stripe_size - 1);
2513			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2514				physLBA = (uint32_t)virtLBA >>
2515				    sc->DD_stripe_exponent;
2516				stripe_unit = physLBA / sc->DD_num_phys_disks;
2517				column = physLBA % sc->DD_num_phys_disks;
2518				pIO_req->DevHandle =
2519				    htole16(sc->DD_column_map[column].dev_handle);
2520				/* ???? Is this endian safe*/
2521				cm->cm_desc.SCSIIO.DevHandle =
2522				    pIO_req->DevHandle;
2523
2524				physLBA = (stripe_unit <<
2525				    sc->DD_stripe_exponent) + stripe_offset;
2526				ptrLBA = &pIO_req->CDB.CDB32[1];
2527				physLBA_byte = (uint8_t)(physLBA >> 16);
2528				*ptrLBA = physLBA_byte;
2529				ptrLBA = &pIO_req->CDB.CDB32[2];
2530				physLBA_byte = (uint8_t)(physLBA >> 8);
2531				*ptrLBA = physLBA_byte;
2532				ptrLBA = &pIO_req->CDB.CDB32[3];
2533				physLBA_byte = (uint8_t)physLBA;
2534				*ptrLBA = physLBA_byte;
2535
2536				/*
2537				 * Set flag that Direct Drive I/O is
2538				 * being done.
2539				 */
2540				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2541			}
2542		}
2543		return;
2544	}
2545
2546	/*
2547	 * Handle 10, 12 or 16 byte CDBs.
2548	 */
2549	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2550	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2551	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2552	    (CDB[0] == WRITE_12))) {
2553		/*
2554		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2555		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2556		 * the else section.  10-byte and 12-byte CDB's are OK.
2557		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2558		 * ready to accept 12byte CDB for Direct IOs.
2559		 */
2560		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2561		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2562		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2563			/*
2564			 * Get the transfer size in blocks.
2565			 */
2566			io_size = (cm->cm_length >> sc->DD_block_exponent);
2567
2568			/*
2569			 * Get virtual LBA.  Point to correct lower 4 bytes of
2570			 * LBA in the CDB depending on command.
2571			 */
2572			lba_idx = ((CDB[0] == READ_12) ||
2573				(CDB[0] == WRITE_12) ||
2574				(CDB[0] == READ_10) ||
2575				(CDB[0] == WRITE_10))? 2 : 6;
2576			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2577			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2578			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2579			    (uint64_t)CDB[lba_idx + 3];
2580
2581			/*
2582			 * Check that LBA range for I/O does not exceed volume's
2583			 * MaxLBA.
2584			 */
2585			if ((virtLBA + (uint64_t)io_size - 1) <=
2586			    sc->DD_max_lba) {
2587				/*
2588				 * Check if the I/O crosses a stripe boundary.
2589				 * If not, translate the virtual LBA to a
2590				 * physical LBA and set the DevHandle for the
2591				 * PhysDisk to be used.  If it does cross a
2592				 * boundry, do normal I/O.  To get the right
2593				 * DevHandle to use, get the map number for the
2594				 * column, then use that map number to look up
2595				 * the DevHandle of the PhysDisk.
2596				 */
2597				stripe_offset = (uint32_t)virtLBA &
2598				    (sc->DD_stripe_size - 1);
2599				if ((stripe_offset + io_size) <=
2600				    sc->DD_stripe_size) {
2601					physLBA = (uint32_t)virtLBA >>
2602					    sc->DD_stripe_exponent;
2603					stripe_unit = physLBA /
2604					    sc->DD_num_phys_disks;
2605					column = physLBA %
2606					    sc->DD_num_phys_disks;
2607					pIO_req->DevHandle =
2608					    htole16(sc->DD_column_map[column].
2609					    dev_handle);
2610					cm->cm_desc.SCSIIO.DevHandle =
2611					    pIO_req->DevHandle;
2612
2613					physLBA = (stripe_unit <<
2614					    sc->DD_stripe_exponent) +
2615					    stripe_offset;
2616					ptrLBA =
2617					    &pIO_req->CDB.CDB32[lba_idx];
2618					physLBA_byte = (uint8_t)(physLBA >> 24);
2619					*ptrLBA = physLBA_byte;
2620					ptrLBA =
2621					    &pIO_req->CDB.CDB32[lba_idx + 1];
2622					physLBA_byte = (uint8_t)(physLBA >> 16);
2623					*ptrLBA = physLBA_byte;
2624					ptrLBA =
2625					    &pIO_req->CDB.CDB32[lba_idx + 2];
2626					physLBA_byte = (uint8_t)(physLBA >> 8);
2627					*ptrLBA = physLBA_byte;
2628					ptrLBA =
2629					    &pIO_req->CDB.CDB32[lba_idx + 3];
2630					physLBA_byte = (uint8_t)physLBA;
2631					*ptrLBA = physLBA_byte;
2632
2633					/*
2634					 * Set flag that Direct Drive I/O is
2635					 * being done.
2636					 */
2637					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2638				}
2639			}
2640		} else {
2641			/*
2642			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2643			 * 0.  Get the transfer size in blocks.
2644			 */
2645			io_size = (cm->cm_length >> sc->DD_block_exponent);
2646
2647			/*
2648			 * Get virtual LBA.
2649			 */
2650			virtLBA = ((uint64_t)CDB[2] << 54) |
2651			    ((uint64_t)CDB[3] << 48) |
2652			    ((uint64_t)CDB[4] << 40) |
2653			    ((uint64_t)CDB[5] << 32) |
2654			    ((uint64_t)CDB[6] << 24) |
2655			    ((uint64_t)CDB[7] << 16) |
2656			    ((uint64_t)CDB[8] << 8) |
2657			    (uint64_t)CDB[9];
2658
2659			/*
2660			 * Check that LBA range for I/O does not exceed volume's
2661			 * MaxLBA.
2662			 */
2663			if ((virtLBA + (uint64_t)io_size - 1) <=
2664			    sc->DD_max_lba) {
2665				/*
2666				 * Check if the I/O crosses a stripe boundary.
2667				 * If not, translate the virtual LBA to a
2668				 * physical LBA and set the DevHandle for the
2669				 * PhysDisk to be used.  If it does cross a
2670				 * boundry, do normal I/O.  To get the right
2671				 * DevHandle to use, get the map number for the
2672				 * column, then use that map number to look up
2673				 * the DevHandle of the PhysDisk.
2674				 */
2675				stripe_offset = (uint32_t)virtLBA &
2676				    (sc->DD_stripe_size - 1);
2677				if ((stripe_offset + io_size) <=
2678				    sc->DD_stripe_size) {
2679					physLBA = (uint32_t)(virtLBA >>
2680					    sc->DD_stripe_exponent);
2681					stripe_unit = physLBA /
2682					    sc->DD_num_phys_disks;
2683					column = physLBA %
2684					    sc->DD_num_phys_disks;
2685					pIO_req->DevHandle =
2686					    htole16(sc->DD_column_map[column].
2687					    dev_handle);
2688					cm->cm_desc.SCSIIO.DevHandle =
2689					    pIO_req->DevHandle;
2690
2691					physLBA = (stripe_unit <<
2692					    sc->DD_stripe_exponent) +
2693					    stripe_offset;
2694
2695					/*
2696					 * Set upper 4 bytes of LBA to 0.  We
2697					 * assume that the phys disks are less
2698					 * than 2 TB's in size.  Then, set the
2699					 * lower 4 bytes.
2700					 */
2701					pIO_req->CDB.CDB32[2] = 0;
2702					pIO_req->CDB.CDB32[3] = 0;
2703					pIO_req->CDB.CDB32[4] = 0;
2704					pIO_req->CDB.CDB32[5] = 0;
2705					ptrLBA = &pIO_req->CDB.CDB32[6];
2706					physLBA_byte = (uint8_t)(physLBA >> 24);
2707					*ptrLBA = physLBA_byte;
2708					ptrLBA = &pIO_req->CDB.CDB32[7];
2709					physLBA_byte = (uint8_t)(physLBA >> 16);
2710					*ptrLBA = physLBA_byte;
2711					ptrLBA = &pIO_req->CDB.CDB32[8];
2712					physLBA_byte = (uint8_t)(physLBA >> 8);
2713					*ptrLBA = physLBA_byte;
2714					ptrLBA = &pIO_req->CDB.CDB32[9];
2715					physLBA_byte = (uint8_t)physLBA;
2716					*ptrLBA = physLBA_byte;
2717
2718					/*
2719					 * Set flag that Direct Drive I/O is
2720					 * being done.
2721					 */
2722					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2723				}
2724			}
2725		}
2726	}
2727}
2728
2729#if __FreeBSD_version >= 900026
2730static void
2731mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2732{
2733	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2734	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2735	uint64_t sasaddr;
2736	union ccb *ccb;
2737
2738	ccb = cm->cm_complete_data;
2739
2740	/*
2741	 * Currently there should be no way we can hit this case.  It only
2742	 * happens when we have a failure to allocate chain frames, and SMP
2743	 * commands require two S/G elements only.  That should be handled
2744	 * in the standard request size.
2745	 */
2746	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2747		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2748			   __func__, cm->cm_flags);
2749		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2750		goto bailout;
2751        }
2752
2753	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2754	if (rpl == NULL) {
2755		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2756		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2757		goto bailout;
2758	}
2759
2760	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2761	sasaddr = le32toh(req->SASAddress.Low);
2762	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2763
2764	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2765	    MPI2_IOCSTATUS_SUCCESS ||
2766	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2767		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2768		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2769		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2770		goto bailout;
2771	}
2772
2773	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2774		   "%#jx completed successfully\n", __func__,
2775		   (uintmax_t)sasaddr);
2776
2777	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2778		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2779	else
2780		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2781
2782bailout:
2783	/*
2784	 * We sync in both directions because we had DMAs in the S/G list
2785	 * in both directions.
2786	 */
2787	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2788			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2789	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2790	mps_free_command(sc, cm);
2791	xpt_done(ccb);
2792}
2793
2794static void
2795mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2796{
2797	struct mps_command *cm;
2798	uint8_t *request, *response;
2799	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2800	struct mps_softc *sc;
2801	struct sglist *sg;
2802	int error;
2803
2804	sc = sassc->sc;
2805	sg = NULL;
2806	error = 0;
2807
2808	/*
2809	 * XXX We don't yet support physical addresses here.
2810	 */
2811	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2812	case CAM_DATA_PADDR:
2813	case CAM_DATA_SG_PADDR:
2814		mps_dprint(sc, MPS_ERROR,
2815			   "%s: physical addresses not supported\n", __func__);
2816		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2817		xpt_done(ccb);
2818		return;
2819	case CAM_DATA_SG:
2820		/*
2821		 * The chip does not support more than one buffer for the
2822		 * request or response.
2823		 */
2824	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2825		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2826			mps_dprint(sc, MPS_ERROR,
2827				   "%s: multiple request or response "
2828				   "buffer segments not supported for SMP\n",
2829				   __func__);
2830			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2831			xpt_done(ccb);
2832			return;
2833		}
2834
2835		/*
2836		 * The CAM_SCATTER_VALID flag was originally implemented
2837		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2838		 * We have two.  So, just take that flag to mean that we
2839		 * might have S/G lists, and look at the S/G segment count
2840		 * to figure out whether that is the case for each individual
2841		 * buffer.
2842		 */
2843		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2844			bus_dma_segment_t *req_sg;
2845
2846			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2847			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2848		} else
2849			request = ccb->smpio.smp_request;
2850
2851		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2852			bus_dma_segment_t *rsp_sg;
2853
2854			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2855			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2856		} else
2857			response = ccb->smpio.smp_response;
2858		break;
2859	case CAM_DATA_VADDR:
2860		request = ccb->smpio.smp_request;
2861		response = ccb->smpio.smp_response;
2862		break;
2863	default:
2864		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2865		xpt_done(ccb);
2866		return;
2867	}
2868
2869	cm = mps_alloc_command(sc);
2870	if (cm == NULL) {
2871		mps_dprint(sc, MPS_ERROR,
2872		    "%s: cannot allocate command\n", __func__);
2873		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2874		xpt_done(ccb);
2875		return;
2876	}
2877
2878	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2879	bzero(req, sizeof(*req));
2880	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2881
2882	/* Allow the chip to use any route to this SAS address. */
2883	req->PhysicalPort = 0xff;
2884
2885	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2886	req->SGLFlags =
2887	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2888
2889	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2890	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2891
2892	mpi_init_sge(cm, req, &req->SGL);
2893
2894	/*
2895	 * Set up a uio to pass into mps_map_command().  This allows us to
2896	 * do one map command, and one busdma call in there.
2897	 */
2898	cm->cm_uio.uio_iov = cm->cm_iovec;
2899	cm->cm_uio.uio_iovcnt = 2;
2900	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2901
2902	/*
2903	 * The read/write flag isn't used by busdma, but set it just in
2904	 * case.  This isn't exactly accurate, either, since we're going in
2905	 * both directions.
2906	 */
2907	cm->cm_uio.uio_rw = UIO_WRITE;
2908
2909	cm->cm_iovec[0].iov_base = request;
2910	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2911	cm->cm_iovec[1].iov_base = response;
2912	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2913
2914	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2915			       cm->cm_iovec[1].iov_len;
2916
2917	/*
2918	 * Trigger a warning message in mps_data_cb() for the user if we
2919	 * wind up exceeding two S/G segments.  The chip expects one
2920	 * segment for the request and another for the response.
2921	 */
2922	cm->cm_max_segs = 2;
2923
2924	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2925	cm->cm_complete = mpssas_smpio_complete;
2926	cm->cm_complete_data = ccb;
2927
2928	/*
2929	 * Tell the mapping code that we're using a uio, and that this is
2930	 * an SMP passthrough request.  There is a little special-case
2931	 * logic there (in mps_data_cb()) to handle the bidirectional
2932	 * transfer.
2933	 */
2934	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2935			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2936
2937	/* The chip data format is little endian. */
2938	req->SASAddress.High = htole32(sasaddr >> 32);
2939	req->SASAddress.Low = htole32(sasaddr);
2940
2941	/*
2942	 * XXX Note that we don't have a timeout/abort mechanism here.
2943	 * From the manual, it looks like task management requests only
2944	 * work for SCSI IO and SATA passthrough requests.  We may need to
2945	 * have a mechanism to retry requests in the event of a chip reset
2946	 * at least.  Hopefully the chip will insure that any errors short
2947	 * of that are relayed back to the driver.
2948	 */
2949	error = mps_map_command(sc, cm);
2950	if ((error != 0) && (error != EINPROGRESS)) {
2951		mps_dprint(sc, MPS_ERROR,
2952			   "%s: error %d returned from mps_map_command()\n",
2953			   __func__, error);
2954		goto bailout_error;
2955	}
2956
2957	return;
2958
2959bailout_error:
2960	mps_free_command(sc, cm);
2961	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2962	xpt_done(ccb);
2963	return;
2964
2965}
2966
2967static void
2968mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2969{
2970	struct mps_softc *sc;
2971	struct mpssas_target *targ;
2972	uint64_t sasaddr = 0;
2973
2974	sc = sassc->sc;
2975
2976	/*
2977	 * Make sure the target exists.
2978	 */
2979	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2980	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2981	targ = &sassc->targets[ccb->ccb_h.target_id];
2982	if (targ->handle == 0x0) {
2983		mps_dprint(sc, MPS_ERROR,
2984			   "%s: target %d does not exist!\n", __func__,
2985			   ccb->ccb_h.target_id);
2986		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2987		xpt_done(ccb);
2988		return;
2989	}
2990
2991	/*
2992	 * If this device has an embedded SMP target, we'll talk to it
2993	 * directly.
2994	 * figure out what the expander's address is.
2995	 */
2996	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2997		sasaddr = targ->sasaddr;
2998
2999	/*
3000	 * If we don't have a SAS address for the expander yet, try
3001	 * grabbing it from the page 0x83 information cached in the
3002	 * transport layer for this target.  LSI expanders report the
3003	 * expander SAS address as the port-associated SAS address in
3004	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3005	 * 0x83.
3006	 *
3007	 * XXX KDM disable this for now, but leave it commented out so that
3008	 * it is obvious that this is another possible way to get the SAS
3009	 * address.
3010	 *
3011	 * The parent handle method below is a little more reliable, and
3012	 * the other benefit is that it works for devices other than SES
3013	 * devices.  So you can send a SMP request to a da(4) device and it
3014	 * will get routed to the expander that device is attached to.
3015	 * (Assuming the da(4) device doesn't contain an SMP target...)
3016	 */
3017#if 0
3018	if (sasaddr == 0)
3019		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3020#endif
3021
3022	/*
3023	 * If we still don't have a SAS address for the expander, look for
3024	 * the parent device of this device, which is probably the expander.
3025	 */
3026	if (sasaddr == 0) {
3027#ifdef OLD_MPS_PROBE
3028		struct mpssas_target *parent_target;
3029#endif
3030
3031		if (targ->parent_handle == 0x0) {
3032			mps_dprint(sc, MPS_ERROR,
3033				   "%s: handle %d does not have a valid "
3034				   "parent handle!\n", __func__, targ->handle);
3035			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3036			goto bailout;
3037		}
3038#ifdef OLD_MPS_PROBE
3039		parent_target = mpssas_find_target_by_handle(sassc, 0,
3040			targ->parent_handle);
3041
3042		if (parent_target == NULL) {
3043			mps_dprint(sc, MPS_ERROR,
3044				   "%s: handle %d does not have a valid "
3045				   "parent target!\n", __func__, targ->handle);
3046			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3047			goto bailout;
3048		}
3049
3050		if ((parent_target->devinfo &
3051		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3052			mps_dprint(sc, MPS_ERROR,
3053				   "%s: handle %d parent %d does not "
3054				   "have an SMP target!\n", __func__,
3055				   targ->handle, parent_target->handle);
3056			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3057			goto bailout;
3058
3059		}
3060
3061		sasaddr = parent_target->sasaddr;
3062#else /* OLD_MPS_PROBE */
3063		if ((targ->parent_devinfo &
3064		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3065			mps_dprint(sc, MPS_ERROR,
3066				   "%s: handle %d parent %d does not "
3067				   "have an SMP target!\n", __func__,
3068				   targ->handle, targ->parent_handle);
3069			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3070			goto bailout;
3071
3072		}
3073		if (targ->parent_sasaddr == 0x0) {
3074			mps_dprint(sc, MPS_ERROR,
3075				   "%s: handle %d parent handle %d does "
3076				   "not have a valid SAS address!\n",
3077				   __func__, targ->handle, targ->parent_handle);
3078			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3079			goto bailout;
3080		}
3081
3082		sasaddr = targ->parent_sasaddr;
3083#endif /* OLD_MPS_PROBE */
3084
3085	}
3086
3087	if (sasaddr == 0) {
3088		mps_dprint(sc, MPS_INFO,
3089			   "%s: unable to find SAS address for handle %d\n",
3090			   __func__, targ->handle);
3091		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3092		goto bailout;
3093	}
3094	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3095
3096	return;
3097
3098bailout:
3099	xpt_done(ccb);
3100
3101}
3102#endif //__FreeBSD_version >= 900026
3103
3104static void
3105mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3106{
3107	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3108	struct mps_softc *sc;
3109	struct mps_command *tm;
3110	struct mpssas_target *targ;
3111
3112	MPS_FUNCTRACE(sassc->sc);
3113	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3114
3115	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3116	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3117	     ccb->ccb_h.target_id));
3118	sc = sassc->sc;
3119	tm = mps_alloc_command(sc);
3120	if (tm == NULL) {
3121		mps_dprint(sc, MPS_ERROR,
3122		    "command alloc failure in mpssas_action_resetdev\n");
3123		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3124		xpt_done(ccb);
3125		return;
3126	}
3127
3128	targ = &sassc->targets[ccb->ccb_h.target_id];
3129	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3130	req->DevHandle = htole16(targ->handle);
3131	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3132	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3133
3134	/* SAS Hard Link Reset / SATA Link Reset */
3135	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3136
3137	tm->cm_data = NULL;
3138	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3139	tm->cm_complete = mpssas_resetdev_complete;
3140	tm->cm_complete_data = ccb;
3141	tm->cm_targ = targ;
3142	mps_map_command(sc, tm);
3143}
3144
3145static void
3146mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3147{
3148	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3149	union ccb *ccb;
3150
3151	MPS_FUNCTRACE(sc);
3152	mtx_assert(&sc->mps_mtx, MA_OWNED);
3153
3154	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3155	ccb = tm->cm_complete_data;
3156
3157	/*
3158	 * Currently there should be no way we can hit this case.  It only
3159	 * happens when we have a failure to allocate chain frames, and
3160	 * task management commands don't have S/G lists.
3161	 */
3162	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3163		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3164
3165		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3166
3167		mps_dprint(sc, MPS_ERROR,
3168			   "%s: cm_flags = %#x for reset of handle %#04x! "
3169			   "This should not happen!\n", __func__, tm->cm_flags,
3170			   req->DevHandle);
3171		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3172		goto bailout;
3173	}
3174
3175	mps_dprint(sc, MPS_XINFO,
3176	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3177	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3178
3179	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3180		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3181		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3182		    CAM_LUN_WILDCARD);
3183	}
3184	else
3185		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3186
3187bailout:
3188
3189	mpssas_free_tm(sc, tm);
3190	xpt_done(ccb);
3191}
3192
3193static void
3194mpssas_poll(struct cam_sim *sim)
3195{
3196	struct mpssas_softc *sassc;
3197
3198	sassc = cam_sim_softc(sim);
3199
3200	if (sassc->sc->mps_debug & MPS_TRACE) {
3201		/* frequent debug messages during a panic just slow
3202		 * everything down too much.
3203		 */
3204		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3205		sassc->sc->mps_debug &= ~MPS_TRACE;
3206	}
3207
3208	mps_intr_locked(sassc->sc);
3209}
3210
3211static void
3212mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3213	     void *arg)
3214{
3215	struct mps_softc *sc;
3216
3217	sc = (struct mps_softc *)callback_arg;
3218
3219	switch (code) {
3220#if (__FreeBSD_version >= 1000006) || \
3221    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3222	case AC_ADVINFO_CHANGED: {
3223		struct mpssas_target *target;
3224		struct mpssas_softc *sassc;
3225		struct scsi_read_capacity_data_long rcap_buf;
3226		struct ccb_dev_advinfo cdai;
3227		struct mpssas_lun *lun;
3228		lun_id_t lunid;
3229		int found_lun;
3230		uintptr_t buftype;
3231
3232		buftype = (uintptr_t)arg;
3233
3234		found_lun = 0;
3235		sassc = sc->sassc;
3236
3237		/*
3238		 * We're only interested in read capacity data changes.
3239		 */
3240		if (buftype != CDAI_TYPE_RCAPLONG)
3241			break;
3242
3243		/*
3244		 * We should have a handle for this, but check to make sure.
3245		 */
3246		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3247		    ("Target %d out of bounds in mpssas_async\n",
3248		    xpt_path_target_id(path)));
3249		target = &sassc->targets[xpt_path_target_id(path)];
3250		if (target->handle == 0)
3251			break;
3252
3253		lunid = xpt_path_lun_id(path);
3254
3255		SLIST_FOREACH(lun, &target->luns, lun_link) {
3256			if (lun->lun_id == lunid) {
3257				found_lun = 1;
3258				break;
3259			}
3260		}
3261
3262		if (found_lun == 0) {
3263			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3264				     M_NOWAIT | M_ZERO);
3265			if (lun == NULL) {
3266				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3267					   "LUN for EEDP support.\n");
3268				break;
3269			}
3270			lun->lun_id = lunid;
3271			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3272		}
3273
3274		bzero(&rcap_buf, sizeof(rcap_buf));
3275		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3276		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3277		cdai.ccb_h.flags = CAM_DIR_IN;
3278		cdai.buftype = CDAI_TYPE_RCAPLONG;
3279		cdai.flags = 0;
3280		cdai.bufsiz = sizeof(rcap_buf);
3281		cdai.buf = (uint8_t *)&rcap_buf;
3282		xpt_action((union ccb *)&cdai);
3283		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3284			cam_release_devq(cdai.ccb_h.path,
3285					 0, 0, 0, FALSE);
3286
3287		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3288		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3289			lun->eedp_formatted = TRUE;
3290			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3291		} else {
3292			lun->eedp_formatted = FALSE;
3293			lun->eedp_block_size = 0;
3294		}
3295		break;
3296	}
3297#else
3298	case AC_FOUND_DEVICE: {
3299		struct ccb_getdev *cgd;
3300
3301		cgd = arg;
3302		mpssas_check_eedp(sc, path, cgd);
3303		break;
3304	}
3305#endif
3306	default:
3307		break;
3308	}
3309}
3310
3311#if (__FreeBSD_version < 901503) || \
3312    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3313static void
3314mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3315		  struct ccb_getdev *cgd)
3316{
3317	struct mpssas_softc *sassc = sc->sassc;
3318	struct ccb_scsiio *csio;
3319	struct scsi_read_capacity_16 *scsi_cmd;
3320	struct scsi_read_capacity_eedp *rcap_buf;
3321	path_id_t pathid;
3322	target_id_t targetid;
3323	lun_id_t lunid;
3324	union ccb *ccb;
3325	struct cam_path *local_path;
3326	struct mpssas_target *target;
3327	struct mpssas_lun *lun;
3328	uint8_t	found_lun;
3329	char path_str[64];
3330
3331	sassc = sc->sassc;
3332	pathid = cam_sim_path(sassc->sim);
3333	targetid = xpt_path_target_id(path);
3334	lunid = xpt_path_lun_id(path);
3335
3336	KASSERT(targetid < sassc->maxtargets,
3337	    ("Target %d out of bounds in mpssas_check_eedp\n",
3338	     targetid));
3339	target = &sassc->targets[targetid];
3340	if (target->handle == 0x0)
3341		return;
3342
3343	/*
3344	 * Determine if the device is EEDP capable.
3345	 *
3346	 * If this flag is set in the inquiry data,
3347	 * the device supports protection information,
3348	 * and must support the 16 byte read
3349	 * capacity command, otherwise continue without
3350	 * sending read cap 16
3351	 */
3352	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3353		return;
3354
3355	/*
3356	 * Issue a READ CAPACITY 16 command.  This info
3357	 * is used to determine if the LUN is formatted
3358	 * for EEDP support.
3359	 */
3360	ccb = xpt_alloc_ccb_nowait();
3361	if (ccb == NULL) {
3362		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3363		    "for EEDP support.\n");
3364		return;
3365	}
3366
3367	if (xpt_create_path(&local_path, xpt_periph,
3368	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3369		mps_dprint(sc, MPS_ERROR, "Unable to create "
3370		    "path for EEDP support\n");
3371		xpt_free_ccb(ccb);
3372		return;
3373	}
3374
3375	/*
3376	 * If LUN is already in list, don't create a new
3377	 * one.
3378	 */
3379	found_lun = FALSE;
3380	SLIST_FOREACH(lun, &target->luns, lun_link) {
3381		if (lun->lun_id == lunid) {
3382			found_lun = TRUE;
3383			break;
3384		}
3385	}
3386	if (!found_lun) {
3387		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3388		    M_NOWAIT | M_ZERO);
3389		if (lun == NULL) {
3390			mps_dprint(sc, MPS_ERROR,
3391			    "Unable to alloc LUN for EEDP support.\n");
3392			xpt_free_path(local_path);
3393			xpt_free_ccb(ccb);
3394			return;
3395		}
3396		lun->lun_id = lunid;
3397		SLIST_INSERT_HEAD(&target->luns, lun,
3398		    lun_link);
3399	}
3400
3401	xpt_path_string(local_path, path_str, sizeof(path_str));
3402
3403	/*
3404	 * If this is a SATA direct-access end device,
3405	 * mark it so that a SCSI StartStopUnit command
3406	 * will be sent to it when the driver is being
3407	 * shutdown.
3408	 */
3409	if ((cgd.inq_data.device == T_DIRECT) &&
3410		(target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3411		((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3412		MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3413		lun->stop_at_shutdown = TRUE;
3414	}
3415
3416	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3417	    path_str, target->handle);
3418
3419	/*
3420	 * Issue a READ CAPACITY 16 command for the LUN.
3421	 * The mpssas_read_cap_done function will load
3422	 * the read cap info into the LUN struct.
3423	 */
3424	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3425	    M_MPT2, M_NOWAIT | M_ZERO);
3426	if (rcap_buf == NULL) {
3427		mps_dprint(sc, MPS_FAULT,
3428		    "Unable to alloc read capacity buffer for EEDP support.\n");
3429		xpt_free_path(ccb->ccb_h.path);
3430		xpt_free_ccb(ccb);
3431		return;
3432	}
3433	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3434	csio = &ccb->csio;
3435	csio->ccb_h.func_code = XPT_SCSI_IO;
3436	csio->ccb_h.flags = CAM_DIR_IN;
3437	csio->ccb_h.retry_count = 4;
3438	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3439	csio->ccb_h.timeout = 60000;
3440	csio->data_ptr = (uint8_t *)rcap_buf;
3441	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3442	csio->sense_len = MPS_SENSE_LEN;
3443	csio->cdb_len = sizeof(*scsi_cmd);
3444	csio->tag_action = MSG_SIMPLE_Q_TAG;
3445
3446	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3447	bzero(scsi_cmd, sizeof(*scsi_cmd));
3448	scsi_cmd->opcode = 0x9E;
3449	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3450	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3451
3452	ccb->ccb_h.ppriv_ptr1 = sassc;
3453	xpt_action(ccb);
3454}
3455
3456static void
3457mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3458{
3459	struct mpssas_softc *sassc;
3460	struct mpssas_target *target;
3461	struct mpssas_lun *lun;
3462	struct scsi_read_capacity_eedp *rcap_buf;
3463
3464	if (done_ccb == NULL)
3465		return;
3466
3467	/* Driver need to release devq, it Scsi command is
3468	 * generated by driver internally.
3469	 * Currently there is a single place where driver
3470	 * calls scsi command internally. In future if driver
3471	 * calls more scsi command internally, it needs to release
3472	 * devq internally, since those command will not go back to
3473	 * cam_periph.
3474	 */
3475	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3476        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3477		xpt_release_devq(done_ccb->ccb_h.path,
3478			       	/*count*/ 1, /*run_queue*/TRUE);
3479	}
3480
3481	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3482
3483	/*
3484	 * Get the LUN ID for the path and look it up in the LUN list for the
3485	 * target.
3486	 */
3487	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3488	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3489	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3490	     done_ccb->ccb_h.target_id));
3491	target = &sassc->targets[done_ccb->ccb_h.target_id];
3492	SLIST_FOREACH(lun, &target->luns, lun_link) {
3493		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3494			continue;
3495
3496		/*
3497		 * Got the LUN in the target's LUN list.  Fill it in
3498		 * with EEDP info.  If the READ CAP 16 command had some
3499		 * SCSI error (common if command is not supported), mark
3500		 * the lun as not supporting EEDP and set the block size
3501		 * to 0.
3502		 */
3503		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3504		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3505			lun->eedp_formatted = FALSE;
3506			lun->eedp_block_size = 0;
3507			break;
3508		}
3509
3510		if (rcap_buf->protect & 0x01) {
3511			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3512 			    "target ID %d is formatted for EEDP "
3513 			    "support.\n", done_ccb->ccb_h.target_lun,
3514 			    done_ccb->ccb_h.target_id);
3515			lun->eedp_formatted = TRUE;
3516			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3517		}
3518		break;
3519	}
3520
3521	// Finished with this CCB and path.
3522	free(rcap_buf, M_MPT2);
3523	xpt_free_path(done_ccb->ccb_h.path);
3524	xpt_free_ccb(done_ccb);
3525}
3526#endif /* (__FreeBSD_version < 901503) || \
3527          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3528
3529int
3530mpssas_startup(struct mps_softc *sc)
3531{
3532
3533	/*
3534	 * Send the port enable message and set the wait_for_port_enable flag.
3535	 * This flag helps to keep the simq frozen until all discovery events
3536	 * are processed.
3537	 */
3538	sc->wait_for_port_enable = 1;
3539	mpssas_send_portenable(sc);
3540	return (0);
3541}
3542
3543static int
3544mpssas_send_portenable(struct mps_softc *sc)
3545{
3546	MPI2_PORT_ENABLE_REQUEST *request;
3547	struct mps_command *cm;
3548
3549	MPS_FUNCTRACE(sc);
3550
3551	if ((cm = mps_alloc_command(sc)) == NULL)
3552		return (EBUSY);
3553	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3554	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3555	request->MsgFlags = 0;
3556	request->VP_ID = 0;
3557	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3558	cm->cm_complete = mpssas_portenable_complete;
3559	cm->cm_data = NULL;
3560	cm->cm_sge = NULL;
3561
3562	mps_map_command(sc, cm);
3563	mps_dprint(sc, MPS_XINFO,
3564	    "mps_send_portenable finished cm %p req %p complete %p\n",
3565	    cm, cm->cm_req, cm->cm_complete);
3566	return (0);
3567}
3568
3569static void
3570mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3571{
3572	MPI2_PORT_ENABLE_REPLY *reply;
3573	struct mpssas_softc *sassc;
3574
3575	MPS_FUNCTRACE(sc);
3576	sassc = sc->sassc;
3577
3578	/*
3579	 * Currently there should be no way we can hit this case.  It only
3580	 * happens when we have a failure to allocate chain frames, and
3581	 * port enable commands don't have S/G lists.
3582	 */
3583	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3584		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3585			   "This should not happen!\n", __func__, cm->cm_flags);
3586	}
3587
3588	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3589	if (reply == NULL)
3590		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3591	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3592	    MPI2_IOCSTATUS_SUCCESS)
3593		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3594
3595	mps_free_command(sc, cm);
3596	if (sc->mps_ich.ich_arg != NULL) {
3597		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3598		config_intrhook_disestablish(&sc->mps_ich);
3599		sc->mps_ich.ich_arg = NULL;
3600	}
3601
3602	/*
3603	 * Get WarpDrive info after discovery is complete but before the scan
3604	 * starts.  At this point, all devices are ready to be exposed to the
3605	 * OS.  If devices should be hidden instead, take them out of the
3606	 * 'targets' array before the scan.  The devinfo for a disk will have
3607	 * some info and a volume's will be 0.  Use that to remove disks.
3608	 */
3609	mps_wd_config_pages(sc);
3610
3611	/*
3612	 * Done waiting for port enable to complete.  Decrement the refcount.
3613	 * If refcount is 0, discovery is complete and a rescan of the bus can
3614	 * take place.  Since the simq was explicitly frozen before port
3615	 * enable, it must be explicitly released here to keep the
3616	 * freeze/release count in sync.
3617	 */
3618	sc->wait_for_port_enable = 0;
3619	sc->port_enable_complete = 1;
3620	wakeup(&sc->port_enable_complete);
3621	mpssas_startup_decrement(sassc);
3622}
3623
3624int
3625mpssas_check_id(struct mpssas_softc *sassc, int id)
3626{
3627	struct mps_softc *sc = sassc->sc;
3628	char *ids;
3629	char *name;
3630
3631	ids = &sc->exclude_ids[0];
3632	while((name = strsep(&ids, ",")) != NULL) {
3633		if (name[0] == '\0')
3634			continue;
3635		if (strtol(name, NULL, 0) == (long)id)
3636			return (1);
3637	}
3638
3639	return (0);
3640}
3641
3642void
3643mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3644{
3645	struct mpssas_softc *sassc;
3646	struct mpssas_lun *lun, *lun_tmp;
3647	struct mpssas_target *targ;
3648	int i;
3649
3650	sassc = sc->sassc;
3651	/*
3652	 * The number of targets is based on IOC Facts, so free all of
3653	 * the allocated LUNs for each target and then the target buffer
3654	 * itself.
3655	 */
3656	for (i=0; i< maxtargets; i++) {
3657		targ = &sassc->targets[i];
3658		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3659			free(lun, M_MPT2);
3660		}
3661	}
3662	free(sassc->targets, M_MPT2);
3663
3664	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3665	    M_MPT2, M_WAITOK|M_ZERO);
3666	if (!sassc->targets) {
3667		panic("%s failed to alloc targets with error %d\n",
3668		    __func__, ENOMEM);
3669	}
3670}
3671