mps_sas.c revision 71b805798bf6d7e4be7d593becfe3bcd5c3bdbf6
1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * LSI MPT-Fusion Host Adapter FreeBSD
28 *
29 * $FreeBSD$
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35/* Communications core for LSI MPT2 */
36
37/* TODO Move headers to mpsvar */
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/selinfo.h>
43#include <sys/module.h>
44#include <sys/bus.h>
45#include <sys/conf.h>
46#include <sys/bio.h>
47#include <sys/malloc.h>
48#include <sys/uio.h>
49#include <sys/sysctl.h>
50#include <sys/endian.h>
51#include <sys/queue.h>
52#include <sys/kthread.h>
53#include <sys/taskqueue.h>
54#include <sys/sbuf.h>
55
56#include <machine/bus.h>
57#include <machine/resource.h>
58#include <sys/rman.h>
59
60#include <machine/stdarg.h>
61
62#include <cam/cam.h>
63#include <cam/cam_ccb.h>
64#include <cam/cam_xpt.h>
65#include <cam/cam_debug.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_periph.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72#if __FreeBSD_version >= 900026
73#include <cam/scsi/smp_all.h>
74#endif
75
76#include <dev/mps/mpi/mpi2_type.h>
77#include <dev/mps/mpi/mpi2.h>
78#include <dev/mps/mpi/mpi2_ioc.h>
79#include <dev/mps/mpi/mpi2_sas.h>
80#include <dev/mps/mpi/mpi2_cnfg.h>
81#include <dev/mps/mpi/mpi2_init.h>
82#include <dev/mps/mpi/mpi2_tool.h>
83#include <dev/mps/mps_ioctl.h>
84#include <dev/mps/mpsvar.h>
85#include <dev/mps/mps_table.h>
86#include <dev/mps/mps_sas.h>
87
88#define MPSSAS_DISCOVERY_TIMEOUT	20
89#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90
91/*
92 * static array to check SCSI OpCode for EEDP protection bits
93 */
94#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97static uint8_t op_code_prot[256] = {
98	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114};
115
116MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117
118static void mpssas_discovery_timeout(void *data);
119static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122static void mpssas_poll(struct cam_sim *sim);
123static void mpssas_scsiio_timeout(void *data);
124static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126    struct mps_command *cm, union ccb *ccb);
127static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130#if __FreeBSD_version >= 900026
131static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133			       uint64_t sasaddr);
134static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135#endif //FreeBSD_version >= 900026
136static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139static void mpssas_async(void *callback_arg, uint32_t code,
140			 struct cam_path *path, void *arg);
141#if (__FreeBSD_version < 901503) || \
142    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144			      struct ccb_getdev *cgd);
145static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146#endif
147static int mpssas_send_portenable(struct mps_softc *sc);
148static void mpssas_portenable_complete(struct mps_softc *sc,
149    struct mps_command *cm);
150
151struct mpssas_target *
152mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153{
154	struct mpssas_target *target;
155	int i;
156
157	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
158		target = &sassc->targets[i];
159		if (target->handle == handle)
160			return (target);
161	}
162
163	return (NULL);
164}
165
166/* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery.  Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
172 */
173void
174mpssas_startup_increment(struct mpssas_softc *sassc)
175{
176	MPS_FUNCTRACE(sassc->sc);
177
178	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179		if (sassc->startup_refcount++ == 0) {
180			/* just starting, freeze the simq */
181			mps_dprint(sassc->sc, MPS_INIT,
182			    "%s freezing simq\n", __func__);
183			xpt_freeze_simq(sassc->sim, 1);
184		}
185		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
186		    sassc->startup_refcount);
187	}
188}
189
190void
191mpssas_startup_decrement(struct mpssas_softc *sassc)
192{
193	MPS_FUNCTRACE(sassc->sc);
194
195	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
196		if (--sassc->startup_refcount == 0) {
197			/* finished all discovery-related actions, release
198			 * the simq and rescan for the latest topology.
199			 */
200			mps_dprint(sassc->sc, MPS_INIT,
201			    "%s releasing simq\n", __func__);
202			sassc->flags &= ~MPSSAS_IN_STARTUP;
203#if __FreeBSD_version >= 1000039
204			xpt_release_boot();
205#else
206			xpt_release_simq(sassc->sim, 1);
207			mpssas_rescan_target(sassc->sc, NULL);
208#endif
209		}
210		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
211		    sassc->startup_refcount);
212	}
213}
214
215/* LSI's firmware requires us to stop sending commands when we're doing task
216 * management, so refcount the TMs and keep the simq frozen when any are in
217 * use.
218 */
219struct mps_command *
220mpssas_alloc_tm(struct mps_softc *sc)
221{
222	struct mps_command *tm;
223
224	MPS_FUNCTRACE(sc);
225	tm = mps_alloc_high_priority_command(sc);
226	if (tm != NULL) {
227		if (sc->sassc->tm_count++ == 0) {
228			mps_dprint(sc, MPS_RECOVERY,
229			    "%s freezing simq\n", __func__);
230			xpt_freeze_simq(sc->sassc->sim, 1);
231		}
232		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
233		    sc->sassc->tm_count);
234	}
235	return tm;
236}
237
238void
239mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
240{
241	mps_dprint(sc, MPS_TRACE, "%s", __func__);
242	if (tm == NULL)
243		return;
244
245	/* if there are no TMs in use, we can release the simq.  We use our
246	 * own refcount so that it's easier for a diag reset to cleanup and
247	 * release the simq.
248	 */
249	if (--sc->sassc->tm_count == 0) {
250		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
251		xpt_release_simq(sc->sassc->sim, 1);
252	}
253	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
254	    sc->sassc->tm_count);
255
256	mps_free_high_priority_command(sc, tm);
257}
258
259void
260mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
261{
262	struct mpssas_softc *sassc = sc->sassc;
263	path_id_t pathid;
264	target_id_t targetid;
265	union ccb *ccb;
266
267	MPS_FUNCTRACE(sc);
268	pathid = cam_sim_path(sassc->sim);
269	if (targ == NULL)
270		targetid = CAM_TARGET_WILDCARD;
271	else
272		targetid = targ - sassc->targets;
273
274	/*
275	 * Allocate a CCB and schedule a rescan.
276	 */
277	ccb = xpt_alloc_ccb_nowait();
278	if (ccb == NULL) {
279		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
280		return;
281	}
282
283	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
284		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
285		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
286		xpt_free_ccb(ccb);
287		return;
288	}
289
290	if (targetid == CAM_TARGET_WILDCARD)
291		ccb->ccb_h.func_code = XPT_SCAN_BUS;
292	else
293		ccb->ccb_h.func_code = XPT_SCAN_TGT;
294
295	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
296	xpt_rescan(ccb);
297}
298
299static void
300mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
301{
302	struct sbuf sb;
303	va_list ap;
304	char str[192];
305	char path_str[64];
306
307	if (cm == NULL)
308		return;
309
310	sbuf_new(&sb, str, sizeof(str), 0);
311
312	va_start(ap, fmt);
313
314	if (cm->cm_ccb != NULL) {
315		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
316				sizeof(path_str));
317		sbuf_cat(&sb, path_str);
318		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
319			scsi_command_string(&cm->cm_ccb->csio, &sb);
320			sbuf_printf(&sb, "length %d ",
321				    cm->cm_ccb->csio.dxfer_len);
322		}
323	}
324	else {
325		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
326		    cam_sim_name(cm->cm_sc->sassc->sim),
327		    cam_sim_unit(cm->cm_sc->sassc->sim),
328		    cam_sim_bus(cm->cm_sc->sassc->sim),
329		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
330		    cm->cm_lun);
331	}
332
333	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
334	sbuf_vprintf(&sb, fmt, ap);
335	sbuf_finish(&sb);
336	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
337
338	va_end(ap);
339}
340
341
342static void
343mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
344{
345	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
346	struct mpssas_target *targ;
347	uint16_t handle;
348
349	MPS_FUNCTRACE(sc);
350
351	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
352	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
353	targ = tm->cm_targ;
354
355	if (reply == NULL) {
356		/* XXX retry the remove after the diag reset completes? */
357		mps_dprint(sc, MPS_FAULT,
358		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
359		mpssas_free_tm(sc, tm);
360		return;
361	}
362
363	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
364		mps_dprint(sc, MPS_FAULT,
365		   "IOCStatus = 0x%x while resetting device 0x%x\n",
366		   reply->IOCStatus, handle);
367		mpssas_free_tm(sc, tm);
368		return;
369	}
370
371	mps_dprint(sc, MPS_XINFO,
372	    "Reset aborted %u commands\n", reply->TerminationCount);
373	mps_free_reply(sc, tm->cm_reply_data);
374	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
375
376	mps_dprint(sc, MPS_XINFO,
377	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
378
379	/*
380	 * Don't clear target if remove fails because things will get confusing.
381	 * Leave the devname and sasaddr intact so that we know to avoid reusing
382	 * this target id if possible, and so we can assign the same target id
383	 * to this device if it comes back in the future.
384	 */
385	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
386		targ = tm->cm_targ;
387		targ->handle = 0x0;
388		targ->encl_handle = 0x0;
389		targ->encl_slot = 0x0;
390		targ->exp_dev_handle = 0x0;
391		targ->phy_num = 0x0;
392		targ->linkrate = 0x0;
393		targ->devinfo = 0x0;
394		targ->flags = 0x0;
395	}
396
397	mpssas_free_tm(sc, tm);
398}
399
400
401/*
402 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
403 * Otherwise Volume Delete is same as Bare Drive Removal.
404 */
405void
406mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
407{
408	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
409	struct mps_softc *sc;
410	struct mps_command *cm;
411	struct mpssas_target *targ = NULL;
412
413	MPS_FUNCTRACE(sassc->sc);
414	sc = sassc->sc;
415
416#ifdef WD_SUPPORT
417	/*
418	 * If this is a WD controller, determine if the disk should be exposed
419	 * to the OS or not.  If disk should be exposed, return from this
420	 * function without doing anything.
421	 */
422	if (sc->WD_available && (sc->WD_hide_expose ==
423	    MPS_WD_EXPOSE_ALWAYS)) {
424		return;
425	}
426#endif //WD_SUPPORT
427
428	targ = mpssas_find_target_by_handle(sassc, 0, handle);
429	if (targ == NULL) {
430		/* FIXME: what is the action? */
431		/* We don't know about this device? */
432		mps_dprint(sc, MPS_ERROR,
433		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
434		return;
435	}
436
437	targ->flags |= MPSSAS_TARGET_INREMOVAL;
438
439	cm = mpssas_alloc_tm(sc);
440	if (cm == NULL) {
441		mps_dprint(sc, MPS_ERROR,
442		    "%s: command alloc failure\n", __func__);
443		return;
444	}
445
446	mpssas_rescan_target(sc, targ);
447
448	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
449	req->DevHandle = targ->handle;
450	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
451	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
452
453	/* SAS Hard Link Reset / SATA Link Reset */
454	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
455
456	cm->cm_targ = targ;
457	cm->cm_data = NULL;
458	cm->cm_desc.HighPriority.RequestFlags =
459	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
460	cm->cm_complete = mpssas_remove_volume;
461	cm->cm_complete_data = (void *)(uintptr_t)handle;
462	mps_map_command(sc, cm);
463}
464
465/*
466 * The MPT2 firmware performs debounce on the link to avoid transient link
467 * errors and false removals.  When it does decide that link has been lost
468 * and a device need to go away, it expects that the host will perform a
469 * target reset and then an op remove.  The reset has the side-effect of
470 * aborting any outstanding requests for the device, which is required for
471 * the op-remove to succeed.  It's not clear if the host should check for
472 * the device coming back alive after the reset.
473 */
474void
475mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
476{
477	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
478	struct mps_softc *sc;
479	struct mps_command *cm;
480	struct mpssas_target *targ = NULL;
481
482	MPS_FUNCTRACE(sassc->sc);
483
484	sc = sassc->sc;
485
486	targ = mpssas_find_target_by_handle(sassc, 0, handle);
487	if (targ == NULL) {
488		/* FIXME: what is the action? */
489		/* We don't know about this device? */
490		mps_dprint(sc, MPS_ERROR,
491		    "%s : invalid handle 0x%x \n", __func__, handle);
492		return;
493	}
494
495	targ->flags |= MPSSAS_TARGET_INREMOVAL;
496
497	cm = mpssas_alloc_tm(sc);
498	if (cm == NULL) {
499		mps_dprint(sc, MPS_ERROR,
500		    "%s: command alloc failure\n", __func__);
501		return;
502	}
503
504	mpssas_rescan_target(sc, targ);
505
506	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
507	memset(req, 0, sizeof(*req));
508	req->DevHandle = htole16(targ->handle);
509	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
510	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
511
512	/* SAS Hard Link Reset / SATA Link Reset */
513	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
514
515	cm->cm_targ = targ;
516	cm->cm_data = NULL;
517	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
518	cm->cm_complete = mpssas_remove_device;
519	cm->cm_complete_data = (void *)(uintptr_t)handle;
520	mps_map_command(sc, cm);
521}
522
523static void
524mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
525{
526	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
527	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
528	struct mpssas_target *targ;
529	struct mps_command *next_cm;
530	uint16_t handle;
531
532	MPS_FUNCTRACE(sc);
533
534	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
535	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
536	targ = tm->cm_targ;
537
538	/*
539	 * Currently there should be no way we can hit this case.  It only
540	 * happens when we have a failure to allocate chain frames, and
541	 * task management commands don't have S/G lists.
542	 */
543	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
544		mps_dprint(sc, MPS_ERROR,
545		    "%s: cm_flags = %#x for remove of handle %#04x! "
546		    "This should not happen!\n", __func__, tm->cm_flags,
547		    handle);
548		mpssas_free_tm(sc, tm);
549		return;
550	}
551
552	if (reply == NULL) {
553		/* XXX retry the remove after the diag reset completes? */
554		mps_dprint(sc, MPS_FAULT,
555		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
556		mpssas_free_tm(sc, tm);
557		return;
558	}
559
560	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
561		mps_dprint(sc, MPS_FAULT,
562		   "IOCStatus = 0x%x while resetting device 0x%x\n",
563		   le16toh(reply->IOCStatus), handle);
564		mpssas_free_tm(sc, tm);
565		return;
566	}
567
568	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
569	    le32toh(reply->TerminationCount));
570	mps_free_reply(sc, tm->cm_reply_data);
571	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
572
573	/* Reuse the existing command */
574	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
575	memset(req, 0, sizeof(*req));
576	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
577	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
578	req->DevHandle = htole16(handle);
579	tm->cm_data = NULL;
580	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
581	tm->cm_complete = mpssas_remove_complete;
582	tm->cm_complete_data = (void *)(uintptr_t)handle;
583
584	mps_map_command(sc, tm);
585
586	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
587		   targ->tid, handle);
588	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
589		union ccb *ccb;
590
591		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
592		ccb = tm->cm_complete_data;
593		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
594		mpssas_scsiio_complete(sc, tm);
595	}
596}
597
598static void
599mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
600{
601	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
602	uint16_t handle;
603	struct mpssas_target *targ;
604	struct mpssas_lun *lun;
605
606	MPS_FUNCTRACE(sc);
607
608	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
609	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
610
611	/*
612	 * Currently there should be no way we can hit this case.  It only
613	 * happens when we have a failure to allocate chain frames, and
614	 * task management commands don't have S/G lists.
615	 */
616	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
617		mps_dprint(sc, MPS_XINFO,
618			   "%s: cm_flags = %#x for remove of handle %#04x! "
619			   "This should not happen!\n", __func__, tm->cm_flags,
620			   handle);
621		mpssas_free_tm(sc, tm);
622		return;
623	}
624
625	if (reply == NULL) {
626		/* most likely a chip reset */
627		mps_dprint(sc, MPS_FAULT,
628		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
629		mpssas_free_tm(sc, tm);
630		return;
631	}
632
633	mps_dprint(sc, MPS_XINFO,
634	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
635	    handle, le16toh(reply->IOCStatus));
636
637	/*
638	 * Don't clear target if remove fails because things will get confusing.
639	 * Leave the devname and sasaddr intact so that we know to avoid reusing
640	 * this target id if possible, and so we can assign the same target id
641	 * to this device if it comes back in the future.
642	 */
643	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
644		targ = tm->cm_targ;
645		targ->handle = 0x0;
646		targ->encl_handle = 0x0;
647		targ->encl_slot = 0x0;
648		targ->exp_dev_handle = 0x0;
649		targ->phy_num = 0x0;
650		targ->linkrate = 0x0;
651		targ->devinfo = 0x0;
652		targ->flags = 0x0;
653
654		while(!SLIST_EMPTY(&targ->luns)) {
655			lun = SLIST_FIRST(&targ->luns);
656			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
657			free(lun, M_MPT2);
658		}
659	}
660
661
662	mpssas_free_tm(sc, tm);
663}
664
665static int
666mpssas_register_events(struct mps_softc *sc)
667{
668	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
669
670	bzero(events, 16);
671	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
672	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
673	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
674	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
675	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
676	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
677	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
678	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
679	setbit(events, MPI2_EVENT_IR_VOLUME);
680	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
681	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
682	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
683
684	mps_register_events(sc, events, mpssas_evt_handler, NULL,
685	    &sc->sassc->mpssas_eh);
686
687	return (0);
688}
689
690int
691mps_attach_sas(struct mps_softc *sc)
692{
693	struct mpssas_softc *sassc;
694	cam_status status;
695	int unit, error = 0;
696
697	MPS_FUNCTRACE(sc);
698
699	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
700	if(!sassc) {
701		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
702		__func__, __LINE__);
703		return (ENOMEM);
704	}
705	sassc->targets = malloc(sizeof(struct mpssas_target) *
706	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
707	if(!sassc->targets) {
708		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709		__func__, __LINE__);
710		free(sassc, M_MPT2);
711		return (ENOMEM);
712	}
713	sc->sassc = sassc;
714	sassc->sc = sc;
715
716	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
717		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
718		error = ENOMEM;
719		goto out;
720	}
721
722	unit = device_get_unit(sc->mps_dev);
723	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
724	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
725	if (sassc->sim == NULL) {
726		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
727		error = EINVAL;
728		goto out;
729	}
730
731	TAILQ_INIT(&sassc->ev_queue);
732
733	/* Initialize taskqueue for Event Handling */
734	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
735	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
736	    taskqueue_thread_enqueue, &sassc->ev_tq);
737
738	/* Run the task queue with lowest priority */
739	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
740	    device_get_nameunit(sc->mps_dev));
741
742	mps_lock(sc);
743
744	/*
745	 * XXX There should be a bus for every port on the adapter, but since
746	 * we're just going to fake the topology for now, we'll pretend that
747	 * everything is just a target on a single bus.
748	 */
749	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
750		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
751		    error);
752		mps_unlock(sc);
753		goto out;
754	}
755
756	/*
757	 * Assume that discovery events will start right away.
758	 *
759	 * Hold off boot until discovery is complete.
760	 */
761	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
762#if __FreeBSD_version >= 1000039
763	xpt_hold_boot();
764#else
765	xpt_freeze_simq(sassc->sim, 1);
766#endif
767	sc->sassc->startup_refcount = 0;
768
769	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
770	sassc->discovery_timeouts = 0;
771
772	sassc->tm_count = 0;
773
774	/*
775	 * Register for async events so we can determine the EEDP
776	 * capabilities of devices.
777	 */
778	status = xpt_create_path(&sassc->path, /*periph*/NULL,
779	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
780	    CAM_LUN_WILDCARD);
781	if (status != CAM_REQ_CMP) {
782		mps_printf(sc, "Error %#x creating sim path\n", status);
783		sassc->path = NULL;
784	} else {
785		int event;
786
787#if (__FreeBSD_version >= 1000006) || \
788    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
789		event = AC_ADVINFO_CHANGED;
790#else
791		event = AC_FOUND_DEVICE;
792#endif
793		status = xpt_register_async(event, mpssas_async, sc,
794					    sassc->path);
795		if (status != CAM_REQ_CMP) {
796			mps_dprint(sc, MPS_ERROR,
797			    "Error %#x registering async handler for "
798			    "AC_ADVINFO_CHANGED events\n", status);
799			xpt_free_path(sassc->path);
800			sassc->path = NULL;
801		}
802	}
803	if (status != CAM_REQ_CMP) {
804		/*
805		 * EEDP use is the exception, not the rule.
806		 * Warn the user, but do not fail to attach.
807		 */
808		mps_printf(sc, "EEDP capabilities disabled.\n");
809	}
810
811	mps_unlock(sc);
812
813	mpssas_register_events(sc);
814out:
815	if (error)
816		mps_detach_sas(sc);
817	return (error);
818}
819
820int
821mps_detach_sas(struct mps_softc *sc)
822{
823	struct mpssas_softc *sassc;
824	struct mpssas_lun *lun, *lun_tmp;
825	struct mpssas_target *targ;
826	int i;
827
828	MPS_FUNCTRACE(sc);
829
830	if (sc->sassc == NULL)
831		return (0);
832
833	sassc = sc->sassc;
834	mps_deregister_events(sc, sassc->mpssas_eh);
835
836	/*
837	 * Drain and free the event handling taskqueue with the lock
838	 * unheld so that any parallel processing tasks drain properly
839	 * without deadlocking.
840	 */
841	if (sassc->ev_tq != NULL)
842		taskqueue_free(sassc->ev_tq);
843
844	/* Make sure CAM doesn't wedge if we had to bail out early. */
845	mps_lock(sc);
846
847	/* Deregister our async handler */
848	if (sassc->path != NULL) {
849		xpt_register_async(0, mpssas_async, sc, sassc->path);
850		xpt_free_path(sassc->path);
851		sassc->path = NULL;
852	}
853
854	if (sassc->flags & MPSSAS_IN_STARTUP)
855		xpt_release_simq(sassc->sim, 1);
856
857	if (sassc->sim != NULL) {
858		xpt_bus_deregister(cam_sim_path(sassc->sim));
859		cam_sim_free(sassc->sim, FALSE);
860	}
861
862	sassc->flags |= MPSSAS_SHUTDOWN;
863	mps_unlock(sc);
864
865	if (sassc->devq != NULL)
866		cam_simq_free(sassc->devq);
867
868	for(i=0; i< sc->facts->MaxTargets ;i++) {
869		targ = &sassc->targets[i];
870		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
871			free(lun, M_MPT2);
872		}
873	}
874	free(sassc->targets, M_MPT2);
875	free(sassc, M_MPT2);
876	sc->sassc = NULL;
877
878	return (0);
879}
880
881void
882mpssas_discovery_end(struct mpssas_softc *sassc)
883{
884	struct mps_softc *sc = sassc->sc;
885
886	MPS_FUNCTRACE(sc);
887
888	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
889		callout_stop(&sassc->discovery_callout);
890
891}
892
893static void
894mpssas_discovery_timeout(void *data)
895{
896	struct mpssas_softc *sassc = data;
897	struct mps_softc *sc;
898
899	sc = sassc->sc;
900	MPS_FUNCTRACE(sc);
901
902	mps_lock(sc);
903	mps_dprint(sc, MPS_INFO,
904	    "Timeout waiting for discovery, interrupts may not be working!\n");
905	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
906
907	/* Poll the hardware for events in case interrupts aren't working */
908	mps_intr_locked(sc);
909
910	mps_dprint(sassc->sc, MPS_INFO,
911	    "Finished polling after discovery timeout at %d\n", ticks);
912
913	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
914		mpssas_discovery_end(sassc);
915	} else {
916		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
917			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
918			callout_reset(&sassc->discovery_callout,
919			    MPSSAS_DISCOVERY_TIMEOUT * hz,
920			    mpssas_discovery_timeout, sassc);
921			sassc->discovery_timeouts++;
922		} else {
923			mps_dprint(sassc->sc, MPS_FAULT,
924			    "Discovery timed out, continuing.\n");
925			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
926			mpssas_discovery_end(sassc);
927		}
928	}
929
930	mps_unlock(sc);
931}
932
933static void
934mpssas_action(struct cam_sim *sim, union ccb *ccb)
935{
936	struct mpssas_softc *sassc;
937
938	sassc = cam_sim_softc(sim);
939
940	MPS_FUNCTRACE(sassc->sc);
941	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
942	    ccb->ccb_h.func_code);
943	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
944
945	switch (ccb->ccb_h.func_code) {
946	case XPT_PATH_INQ:
947	{
948		struct ccb_pathinq *cpi = &ccb->cpi;
949
950		cpi->version_num = 1;
951		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
952		cpi->target_sprt = 0;
953#if __FreeBSD_version >= 1000039
954		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
955#else
956		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
957#endif
958		cpi->hba_eng_cnt = 0;
959		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
960		cpi->max_lun = 255;
961		cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
962		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
963		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
964		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
965		cpi->unit_number = cam_sim_unit(sim);
966		cpi->bus_id = cam_sim_bus(sim);
967		cpi->base_transfer_speed = 150000;
968		cpi->transport = XPORT_SAS;
969		cpi->transport_version = 0;
970		cpi->protocol = PROTO_SCSI;
971		cpi->protocol_version = SCSI_REV_SPC;
972#if __FreeBSD_version >= 800001
973		/*
974		 * XXX KDM where does this number come from?
975		 */
976		cpi->maxio = 256 * 1024;
977#endif
978		cpi->ccb_h.status = CAM_REQ_CMP;
979		break;
980	}
981	case XPT_GET_TRAN_SETTINGS:
982	{
983		struct ccb_trans_settings	*cts;
984		struct ccb_trans_settings_sas	*sas;
985		struct ccb_trans_settings_scsi	*scsi;
986		struct mpssas_target *targ;
987
988		cts = &ccb->cts;
989		sas = &cts->xport_specific.sas;
990		scsi = &cts->proto_specific.scsi;
991
992		targ = &sassc->targets[cts->ccb_h.target_id];
993		if (targ->handle == 0x0) {
994			cts->ccb_h.status = CAM_SEL_TIMEOUT;
995			break;
996		}
997
998		cts->protocol_version = SCSI_REV_SPC2;
999		cts->transport = XPORT_SAS;
1000		cts->transport_version = 0;
1001
1002		sas->valid = CTS_SAS_VALID_SPEED;
1003		switch (targ->linkrate) {
1004		case 0x08:
1005			sas->bitrate = 150000;
1006			break;
1007		case 0x09:
1008			sas->bitrate = 300000;
1009			break;
1010		case 0x0a:
1011			sas->bitrate = 600000;
1012			break;
1013		default:
1014			sas->valid = 0;
1015		}
1016
1017		cts->protocol = PROTO_SCSI;
1018		scsi->valid = CTS_SCSI_VALID_TQ;
1019		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1020
1021		cts->ccb_h.status = CAM_REQ_CMP;
1022		break;
1023	}
1024	case XPT_CALC_GEOMETRY:
1025		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1026		ccb->ccb_h.status = CAM_REQ_CMP;
1027		break;
1028	case XPT_RESET_DEV:
1029		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1030		mpssas_action_resetdev(sassc, ccb);
1031		return;
1032	case XPT_RESET_BUS:
1033	case XPT_ABORT:
1034	case XPT_TERM_IO:
1035		mps_dprint(sassc->sc, MPS_XINFO,
1036		    "mpssas_action faking success for abort or reset\n");
1037		ccb->ccb_h.status = CAM_REQ_CMP;
1038		break;
1039	case XPT_SCSI_IO:
1040		mpssas_action_scsiio(sassc, ccb);
1041		return;
1042#if __FreeBSD_version >= 900026
1043	case XPT_SMP_IO:
1044		mpssas_action_smpio(sassc, ccb);
1045		return;
1046#endif
1047	default:
1048		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1049		break;
1050	}
1051	xpt_done(ccb);
1052
1053}
1054
1055static void
1056mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1057    target_id_t target_id, lun_id_t lun_id)
1058{
1059	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1060	struct cam_path *path;
1061
1062	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1063	    ac_code, target_id, lun_id);
1064
1065	if (xpt_create_path(&path, NULL,
1066		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1067		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1068			   "notification\n");
1069		return;
1070	}
1071
1072	xpt_async(ac_code, path, NULL);
1073	xpt_free_path(path);
1074}
1075
1076static void
1077mpssas_complete_all_commands(struct mps_softc *sc)
1078{
1079	struct mps_command *cm;
1080	int i;
1081	int completed;
1082
1083	MPS_FUNCTRACE(sc);
1084	mtx_assert(&sc->mps_mtx, MA_OWNED);
1085
1086	/* complete all commands with a NULL reply */
1087	for (i = 1; i < sc->num_reqs; i++) {
1088		cm = &sc->commands[i];
1089		cm->cm_reply = NULL;
1090		completed = 0;
1091
1092		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1093			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1094
1095		if (cm->cm_complete != NULL) {
1096			mpssas_log_command(cm, MPS_RECOVERY,
1097			    "completing cm %p state %x ccb %p for diag reset\n",
1098			    cm, cm->cm_state, cm->cm_ccb);
1099
1100			cm->cm_complete(sc, cm);
1101			completed = 1;
1102		}
1103
1104		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1105			mpssas_log_command(cm, MPS_RECOVERY,
1106			    "waking up cm %p state %x ccb %p for diag reset\n",
1107			    cm, cm->cm_state, cm->cm_ccb);
1108			wakeup(cm);
1109			completed = 1;
1110		}
1111
1112		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1113			/* this should never happen, but if it does, log */
1114			mpssas_log_command(cm, MPS_RECOVERY,
1115			    "cm %p state %x flags 0x%x ccb %p during diag "
1116			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1117			    cm->cm_ccb);
1118		}
1119	}
1120}
1121
1122void
1123mpssas_handle_reinit(struct mps_softc *sc)
1124{
1125	int i;
1126
1127	/* Go back into startup mode and freeze the simq, so that CAM
1128	 * doesn't send any commands until after we've rediscovered all
1129	 * targets and found the proper device handles for them.
1130	 *
1131	 * After the reset, portenable will trigger discovery, and after all
1132	 * discovery-related activities have finished, the simq will be
1133	 * released.
1134	 */
1135	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1136	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1137	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1138	xpt_freeze_simq(sc->sassc->sim, 1);
1139
1140	/* notify CAM of a bus reset */
1141	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1142	    CAM_LUN_WILDCARD);
1143
1144	/* complete and cleanup after all outstanding commands */
1145	mpssas_complete_all_commands(sc);
1146
1147	mps_dprint(sc, MPS_INIT,
1148	    "%s startup %u tm %u after command completion\n",
1149	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1150
1151	/*
1152	 * The simq was explicitly frozen above, so set the refcount to 0.
1153	 * The simq will be explicitly released after port enable completes.
1154	 */
1155	sc->sassc->startup_refcount = 0;
1156
1157	/* zero all the target handles, since they may change after the
1158	 * reset, and we have to rediscover all the targets and use the new
1159	 * handles.
1160	 */
1161	for (i = 0; i < sc->facts->MaxTargets; i++) {
1162		if (sc->sassc->targets[i].outstanding != 0)
1163			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1164			    i, sc->sassc->targets[i].outstanding);
1165		sc->sassc->targets[i].handle = 0x0;
1166		sc->sassc->targets[i].exp_dev_handle = 0x0;
1167		sc->sassc->targets[i].outstanding = 0;
1168		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1169	}
1170}
1171
1172static void
1173mpssas_tm_timeout(void *data)
1174{
1175	struct mps_command *tm = data;
1176	struct mps_softc *sc = tm->cm_sc;
1177
1178	mtx_assert(&sc->mps_mtx, MA_OWNED);
1179
1180	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1181	    "task mgmt %p timed out\n", tm);
1182	mps_reinit(sc);
1183}
1184
1185static void
1186mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1187{
1188	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1189	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1190	unsigned int cm_count = 0;
1191	struct mps_command *cm;
1192	struct mpssas_target *targ;
1193
1194	callout_stop(&tm->cm_callout);
1195
1196	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1197	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1198	targ = tm->cm_targ;
1199
1200	/*
1201	 * Currently there should be no way we can hit this case.  It only
1202	 * happens when we have a failure to allocate chain frames, and
1203	 * task management commands don't have S/G lists.
1204	 * XXXSL So should it be an assertion?
1205	 */
1206	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1207		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1208			   "This should not happen!\n", __func__, tm->cm_flags);
1209		mpssas_free_tm(sc, tm);
1210		return;
1211	}
1212
1213	if (reply == NULL) {
1214		mpssas_log_command(tm, MPS_RECOVERY,
1215		    "NULL reset reply for tm %p\n", tm);
1216		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1217			/* this completion was due to a reset, just cleanup */
1218			targ->flags &= ~MPSSAS_TARGET_INRESET;
1219			targ->tm = NULL;
1220			mpssas_free_tm(sc, tm);
1221		}
1222		else {
1223			/* we should have gotten a reply. */
1224			mps_reinit(sc);
1225		}
1226		return;
1227	}
1228
1229	mpssas_log_command(tm, MPS_RECOVERY,
1230	    "logical unit reset status 0x%x code 0x%x count %u\n",
1231	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1232	    le32toh(reply->TerminationCount));
1233
1234	/* See if there are any outstanding commands for this LUN.
1235	 * This could be made more efficient by using a per-LU data
1236	 * structure of some sort.
1237	 */
1238	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1239		if (cm->cm_lun == tm->cm_lun)
1240			cm_count++;
1241	}
1242
1243	if (cm_count == 0) {
1244		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1245		    "logical unit %u finished recovery after reset\n",
1246		    tm->cm_lun, tm);
1247
1248		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1249		    tm->cm_lun);
1250
1251		/* we've finished recovery for this logical unit.  check and
1252		 * see if some other logical unit has a timedout command
1253		 * that needs to be processed.
1254		 */
1255		cm = TAILQ_FIRST(&targ->timedout_commands);
1256		if (cm) {
1257			mpssas_send_abort(sc, tm, cm);
1258		}
1259		else {
1260			targ->tm = NULL;
1261			mpssas_free_tm(sc, tm);
1262		}
1263	}
1264	else {
1265		/* if we still have commands for this LUN, the reset
1266		 * effectively failed, regardless of the status reported.
1267		 * Escalate to a target reset.
1268		 */
1269		mpssas_log_command(tm, MPS_RECOVERY,
1270		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1271		    tm, cm_count);
1272		mpssas_send_reset(sc, tm,
1273		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1274	}
1275}
1276
1277static void
1278mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1279{
1280	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1281	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1282	struct mpssas_target *targ;
1283
1284	callout_stop(&tm->cm_callout);
1285
1286	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1287	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1288	targ = tm->cm_targ;
1289
1290	/*
1291	 * Currently there should be no way we can hit this case.  It only
1292	 * happens when we have a failure to allocate chain frames, and
1293	 * task management commands don't have S/G lists.
1294	 */
1295	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1296		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1297			   "This should not happen!\n", __func__, tm->cm_flags);
1298		mpssas_free_tm(sc, tm);
1299		return;
1300	}
1301
1302	if (reply == NULL) {
1303		mpssas_log_command(tm, MPS_RECOVERY,
1304		    "NULL reset reply for tm %p\n", tm);
1305		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1306			/* this completion was due to a reset, just cleanup */
1307			targ->flags &= ~MPSSAS_TARGET_INRESET;
1308			targ->tm = NULL;
1309			mpssas_free_tm(sc, tm);
1310		}
1311		else {
1312			/* we should have gotten a reply. */
1313			mps_reinit(sc);
1314		}
1315		return;
1316	}
1317
1318	mpssas_log_command(tm, MPS_RECOVERY,
1319	    "target reset status 0x%x code 0x%x count %u\n",
1320	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1321	    le32toh(reply->TerminationCount));
1322
1323	targ->flags &= ~MPSSAS_TARGET_INRESET;
1324
1325	if (targ->outstanding == 0) {
1326		/* we've finished recovery for this target and all
1327		 * of its logical units.
1328		 */
1329		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1330		    "recovery finished after target reset\n");
1331
1332		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1333		    CAM_LUN_WILDCARD);
1334
1335		targ->tm = NULL;
1336		mpssas_free_tm(sc, tm);
1337	}
1338	else {
1339		/* after a target reset, if this target still has
1340		 * outstanding commands, the reset effectively failed,
1341		 * regardless of the status reported.  escalate.
1342		 */
1343		mpssas_log_command(tm, MPS_RECOVERY,
1344		    "target reset complete for tm %p, but still have %u command(s)\n",
1345		    tm, targ->outstanding);
1346		mps_reinit(sc);
1347	}
1348}
1349
1350#define MPS_RESET_TIMEOUT 30
1351
1352static int
1353mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1354{
1355	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1356	struct mpssas_target *target;
1357	int err;
1358
1359	target = tm->cm_targ;
1360	if (target->handle == 0) {
1361		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1362		    __func__, target->tid);
1363		return -1;
1364	}
1365
1366	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1367	req->DevHandle = htole16(target->handle);
1368	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1369	req->TaskType = type;
1370
1371	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1372		/* XXX Need to handle invalid LUNs */
1373		MPS_SET_LUN(req->LUN, tm->cm_lun);
1374		tm->cm_targ->logical_unit_resets++;
1375		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1376		    "sending logical unit reset\n");
1377		tm->cm_complete = mpssas_logical_unit_reset_complete;
1378	}
1379	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1380		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1381		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1382		tm->cm_targ->target_resets++;
1383		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1384		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1385		    "sending target reset\n");
1386		tm->cm_complete = mpssas_target_reset_complete;
1387	}
1388	else {
1389		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1390		return -1;
1391	}
1392
1393	tm->cm_data = NULL;
1394	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1395	tm->cm_complete_data = (void *)tm;
1396
1397	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1398	    mpssas_tm_timeout, tm);
1399
1400	err = mps_map_command(sc, tm);
1401	if (err)
1402		mpssas_log_command(tm, MPS_RECOVERY,
1403		    "error %d sending reset type %u\n",
1404		    err, type);
1405
1406	return err;
1407}
1408
1409
1410static void
1411mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1412{
1413	struct mps_command *cm;
1414	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1415	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1416	struct mpssas_target *targ;
1417
1418	callout_stop(&tm->cm_callout);
1419
1420	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1421	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1422	targ = tm->cm_targ;
1423
1424	/*
1425	 * Currently there should be no way we can hit this case.  It only
1426	 * happens when we have a failure to allocate chain frames, and
1427	 * task management commands don't have S/G lists.
1428	 */
1429	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1430		mpssas_log_command(tm, MPS_RECOVERY,
1431		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1432		    tm->cm_flags, tm, le16toh(req->TaskMID));
1433		mpssas_free_tm(sc, tm);
1434		return;
1435	}
1436
1437	if (reply == NULL) {
1438		mpssas_log_command(tm, MPS_RECOVERY,
1439		    "NULL abort reply for tm %p TaskMID %u\n",
1440		    tm, le16toh(req->TaskMID));
1441		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1442			/* this completion was due to a reset, just cleanup */
1443			targ->tm = NULL;
1444			mpssas_free_tm(sc, tm);
1445		}
1446		else {
1447			/* we should have gotten a reply. */
1448			mps_reinit(sc);
1449		}
1450		return;
1451	}
1452
1453	mpssas_log_command(tm, MPS_RECOVERY,
1454	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1455	    le16toh(req->TaskMID),
1456	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1457	    le32toh(reply->TerminationCount));
1458
1459	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1460	if (cm == NULL) {
1461		/* if there are no more timedout commands, we're done with
1462		 * error recovery for this target.
1463		 */
1464		mpssas_log_command(tm, MPS_RECOVERY,
1465		    "finished recovery after aborting TaskMID %u\n",
1466		    le16toh(req->TaskMID));
1467
1468		targ->tm = NULL;
1469		mpssas_free_tm(sc, tm);
1470	}
1471	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1472		/* abort success, but we have more timedout commands to abort */
1473		mpssas_log_command(tm, MPS_RECOVERY,
1474		    "continuing recovery after aborting TaskMID %u\n",
1475		    le16toh(req->TaskMID));
1476
1477		mpssas_send_abort(sc, tm, cm);
1478	}
1479	else {
1480		/* we didn't get a command completion, so the abort
1481		 * failed as far as we're concerned.  escalate.
1482		 */
1483		mpssas_log_command(tm, MPS_RECOVERY,
1484		    "abort failed for TaskMID %u tm %p\n",
1485		    le16toh(req->TaskMID), tm);
1486
1487		mpssas_send_reset(sc, tm,
1488		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1489	}
1490}
1491
1492#define MPS_ABORT_TIMEOUT 5
1493
1494static int
1495mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1496{
1497	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1498	struct mpssas_target *targ;
1499	int err;
1500
1501	targ = cm->cm_targ;
1502	if (targ->handle == 0) {
1503		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1504		    __func__, cm->cm_ccb->ccb_h.target_id);
1505		return -1;
1506	}
1507
1508	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1509	    "Aborting command %p\n", cm);
1510
1511	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1512	req->DevHandle = htole16(targ->handle);
1513	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1514	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1515
1516	/* XXX Need to handle invalid LUNs */
1517	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1518
1519	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1520
1521	tm->cm_data = NULL;
1522	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1523	tm->cm_complete = mpssas_abort_complete;
1524	tm->cm_complete_data = (void *)tm;
1525	tm->cm_targ = cm->cm_targ;
1526	tm->cm_lun = cm->cm_lun;
1527
1528	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1529	    mpssas_tm_timeout, tm);
1530
1531	targ->aborts++;
1532
1533	err = mps_map_command(sc, tm);
1534	if (err)
1535		mpssas_log_command(tm, MPS_RECOVERY,
1536		    "error %d sending abort for cm %p SMID %u\n",
1537		    err, cm, req->TaskMID);
1538	return err;
1539}
1540
1541
1542static void
1543mpssas_scsiio_timeout(void *data)
1544{
1545	struct mps_softc *sc;
1546	struct mps_command *cm;
1547	struct mpssas_target *targ;
1548
1549	cm = (struct mps_command *)data;
1550	sc = cm->cm_sc;
1551
1552	MPS_FUNCTRACE(sc);
1553	mtx_assert(&sc->mps_mtx, MA_OWNED);
1554
1555	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1556
1557	/*
1558	 * Run the interrupt handler to make sure it's not pending.  This
1559	 * isn't perfect because the command could have already completed
1560	 * and been re-used, though this is unlikely.
1561	 */
1562	mps_intr_locked(sc);
1563	if (cm->cm_state == MPS_CM_STATE_FREE) {
1564		mpssas_log_command(cm, MPS_XINFO,
1565		    "SCSI command %p almost timed out\n", cm);
1566		return;
1567	}
1568
1569	if (cm->cm_ccb == NULL) {
1570		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1571		return;
1572	}
1573
1574	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1575	    cm, cm->cm_ccb);
1576
1577	targ = cm->cm_targ;
1578	targ->timeouts++;
1579
1580	/* XXX first, check the firmware state, to see if it's still
1581	 * operational.  if not, do a diag reset.
1582	 */
1583
1584	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1585	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1586	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1587
1588	if (targ->tm != NULL) {
1589		/* target already in recovery, just queue up another
1590		 * timedout command to be processed later.
1591		 */
1592		mps_dprint(sc, MPS_RECOVERY,
1593		    "queued timedout cm %p for processing by tm %p\n",
1594		    cm, targ->tm);
1595	}
1596	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1597		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1598		    cm, targ->tm);
1599
1600		/* start recovery by aborting the first timedout command */
1601		mpssas_send_abort(sc, targ->tm, cm);
1602	}
1603	else {
1604		/* XXX queue this target up for recovery once a TM becomes
1605		 * available.  The firmware only has a limited number of
1606		 * HighPriority credits for the high priority requests used
1607		 * for task management, and we ran out.
1608		 *
1609		 * Isilon: don't worry about this for now, since we have
1610		 * more credits than disks in an enclosure, and limit
1611		 * ourselves to one TM per target for recovery.
1612		 */
1613		mps_dprint(sc, MPS_RECOVERY,
1614		    "timedout cm %p failed to allocate a tm\n", cm);
1615	}
1616
1617}
1618
1619static void
1620mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1621{
1622	MPI2_SCSI_IO_REQUEST *req;
1623	struct ccb_scsiio *csio;
1624	struct mps_softc *sc;
1625	struct mpssas_target *targ;
1626	struct mpssas_lun *lun;
1627	struct mps_command *cm;
1628	uint8_t i, lba_byte, *ref_tag_addr;
1629	uint16_t eedp_flags;
1630	uint32_t mpi_control;
1631
1632	sc = sassc->sc;
1633	MPS_FUNCTRACE(sc);
1634	mtx_assert(&sc->mps_mtx, MA_OWNED);
1635
1636	csio = &ccb->csio;
1637	targ = &sassc->targets[csio->ccb_h.target_id];
1638	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1639	if (targ->handle == 0x0) {
1640		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1641		    __func__, csio->ccb_h.target_id);
1642		csio->ccb_h.status = CAM_SEL_TIMEOUT;
1643		xpt_done(ccb);
1644		return;
1645	}
1646	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1647		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO supported %u\n",
1648		    __func__, csio->ccb_h.target_id);
1649		csio->ccb_h.status = CAM_TID_INVALID;
1650		xpt_done(ccb);
1651		return;
1652	}
1653	/*
1654	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1655	 * that the volume has timed out.  We want volumes to be enumerated
1656	 * until they are deleted/removed, not just failed.
1657	 */
1658	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1659		if (targ->devinfo == 0)
1660			csio->ccb_h.status = CAM_REQ_CMP;
1661		else
1662			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1663		xpt_done(ccb);
1664		return;
1665	}
1666
1667	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1668		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1669		csio->ccb_h.status = CAM_TID_INVALID;
1670		xpt_done(ccb);
1671		return;
1672	}
1673
1674	cm = mps_alloc_command(sc);
1675	if (cm == NULL) {
1676		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1677			xpt_freeze_simq(sassc->sim, 1);
1678			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1679		}
1680		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1681		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1682		xpt_done(ccb);
1683		return;
1684	}
1685
1686	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1687	bzero(req, sizeof(*req));
1688	req->DevHandle = htole16(targ->handle);
1689	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1690	req->MsgFlags = 0;
1691	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1692	req->SenseBufferLength = MPS_SENSE_LEN;
1693	req->SGLFlags = 0;
1694	req->ChainOffset = 0;
1695	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1696	req->SGLOffset1= 0;
1697	req->SGLOffset2= 0;
1698	req->SGLOffset3= 0;
1699	req->SkipCount = 0;
1700	req->DataLength = htole32(csio->dxfer_len);
1701	req->BidirectionalDataLength = 0;
1702	req->IoFlags = htole16(csio->cdb_len);
1703	req->EEDPFlags = 0;
1704
1705	/* Note: BiDirectional transfers are not supported */
1706	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1707	case CAM_DIR_IN:
1708		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1709		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1710		break;
1711	case CAM_DIR_OUT:
1712		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1713		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1714		break;
1715	case CAM_DIR_NONE:
1716	default:
1717		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1718		break;
1719	}
1720
1721  if (csio->cdb_len == 32)
1722                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1723	/*
1724	 * It looks like the hardware doesn't require an explicit tag
1725	 * number for each transaction.  SAM Task Management not supported
1726	 * at the moment.
1727	 */
1728	switch (csio->tag_action) {
1729	case MSG_HEAD_OF_Q_TAG:
1730		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1731		break;
1732	case MSG_ORDERED_Q_TAG:
1733		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1734		break;
1735	case MSG_ACA_TASK:
1736		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1737		break;
1738	case CAM_TAG_ACTION_NONE:
1739	case MSG_SIMPLE_Q_TAG:
1740	default:
1741		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1742		break;
1743	}
1744	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1745	req->Control = htole32(mpi_control);
1746	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1747		mps_free_command(sc, cm);
1748		ccb->ccb_h.status = CAM_LUN_INVALID;
1749		xpt_done(ccb);
1750		return;
1751	}
1752
1753	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1754		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1755	else
1756		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1757	req->IoFlags = htole16(csio->cdb_len);
1758
1759	/*
1760	 * Check if EEDP is supported and enabled.  If it is then check if the
1761	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1762	 * is formatted for EEDP support.  If all of this is true, set CDB up
1763	 * for EEDP transfer.
1764	 */
1765	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1766	if (sc->eedp_enabled && eedp_flags) {
1767		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1768			if (lun->lun_id == csio->ccb_h.target_lun) {
1769				break;
1770			}
1771		}
1772
1773		if ((lun != NULL) && (lun->eedp_formatted)) {
1774			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1775			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1776			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1777			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1778			req->EEDPFlags = htole16(eedp_flags);
1779
1780			/*
1781			 * If CDB less than 32, fill in Primary Ref Tag with
1782			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1783			 * already there.  Also, set protection bit.  FreeBSD
1784			 * currently does not support CDBs bigger than 16, but
1785			 * the code doesn't hurt, and will be here for the
1786			 * future.
1787			 */
1788			if (csio->cdb_len != 32) {
1789				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1790				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1791				    PrimaryReferenceTag;
1792				for (i = 0; i < 4; i++) {
1793					*ref_tag_addr =
1794					    req->CDB.CDB32[lba_byte + i];
1795					ref_tag_addr++;
1796				}
1797				req->CDB.EEDP32.PrimaryReferenceTag =
1798					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1799				req->CDB.EEDP32.PrimaryApplicationTagMask =
1800				    0xFFFF;
1801				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1802				    0x20;
1803			} else {
1804				eedp_flags |=
1805				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1806				req->EEDPFlags = htole16(eedp_flags);
1807				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1808				    0x1F) | 0x20;
1809			}
1810		}
1811	}
1812
1813	cm->cm_length = csio->dxfer_len;
1814	if (cm->cm_length != 0) {
1815		cm->cm_data = ccb;
1816		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1817	} else {
1818		cm->cm_data = NULL;
1819	}
1820	cm->cm_sge = &req->SGL;
1821	cm->cm_sglsize = (32 - 24) * 4;
1822	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1823	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1824	cm->cm_complete = mpssas_scsiio_complete;
1825	cm->cm_complete_data = ccb;
1826	cm->cm_targ = targ;
1827	cm->cm_lun = csio->ccb_h.target_lun;
1828	cm->cm_ccb = ccb;
1829
1830	/*
1831	 * If HBA is a WD and the command is not for a retry, try to build a
1832	 * direct I/O message. If failed, or the command is for a retry, send
1833	 * the I/O to the IR volume itself.
1834	 */
1835	if (sc->WD_valid_config) {
1836		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1837			mpssas_direct_drive_io(sassc, cm, ccb);
1838		} else {
1839			ccb->ccb_h.status = CAM_REQ_INPROG;
1840		}
1841	}
1842
1843	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1844	   mpssas_scsiio_timeout, cm);
1845
1846	targ->issued++;
1847	targ->outstanding++;
1848	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1849
1850	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1851	    __func__, cm, ccb, targ->outstanding);
1852
1853	mps_map_command(sc, cm);
1854	return;
1855}
1856
1857static void
1858mps_response_code(struct mps_softc *sc, u8 response_code)
1859{
1860        char *desc;
1861
1862        switch (response_code) {
1863        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1864                desc = "task management request completed";
1865                break;
1866        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1867                desc = "invalid frame";
1868                break;
1869        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1870                desc = "task management request not supported";
1871                break;
1872        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1873                desc = "task management request failed";
1874                break;
1875        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1876                desc = "task management request succeeded";
1877                break;
1878        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1879                desc = "invalid lun";
1880                break;
1881        case 0xA:
1882                desc = "overlapped tag attempted";
1883                break;
1884        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1885                desc = "task queued, however not sent to target";
1886                break;
1887        default:
1888                desc = "unknown";
1889                break;
1890        }
1891		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1892                response_code, desc);
1893}
1894/**
1895 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1896 */
1897static void
1898mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1899    Mpi2SCSIIOReply_t *mpi_reply)
1900{
1901	u32 response_info;
1902	u8 *response_bytes;
1903	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1904	    MPI2_IOCSTATUS_MASK;
1905	u8 scsi_state = mpi_reply->SCSIState;
1906	u8 scsi_status = mpi_reply->SCSIStatus;
1907	char *desc_ioc_state = NULL;
1908	char *desc_scsi_status = NULL;
1909	char *desc_scsi_state = sc->tmp_string;
1910	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1911
1912	if (log_info == 0x31170000)
1913		return;
1914
1915	switch (ioc_status) {
1916	case MPI2_IOCSTATUS_SUCCESS:
1917		desc_ioc_state = "success";
1918		break;
1919	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1920		desc_ioc_state = "invalid function";
1921		break;
1922	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1923		desc_ioc_state = "scsi recovered error";
1924		break;
1925	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1926		desc_ioc_state = "scsi invalid dev handle";
1927		break;
1928	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1929		desc_ioc_state = "scsi device not there";
1930		break;
1931	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1932		desc_ioc_state = "scsi data overrun";
1933		break;
1934	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1935		desc_ioc_state = "scsi data underrun";
1936		break;
1937	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1938		desc_ioc_state = "scsi io data error";
1939		break;
1940	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1941		desc_ioc_state = "scsi protocol error";
1942		break;
1943	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1944		desc_ioc_state = "scsi task terminated";
1945		break;
1946	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1947		desc_ioc_state = "scsi residual mismatch";
1948		break;
1949	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1950		desc_ioc_state = "scsi task mgmt failed";
1951		break;
1952	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1953		desc_ioc_state = "scsi ioc terminated";
1954		break;
1955	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1956		desc_ioc_state = "scsi ext terminated";
1957		break;
1958	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1959		desc_ioc_state = "eedp guard error";
1960		break;
1961	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1962		desc_ioc_state = "eedp ref tag error";
1963		break;
1964	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1965		desc_ioc_state = "eedp app tag error";
1966		break;
1967	default:
1968		desc_ioc_state = "unknown";
1969		break;
1970	}
1971
1972	switch (scsi_status) {
1973	case MPI2_SCSI_STATUS_GOOD:
1974		desc_scsi_status = "good";
1975		break;
1976	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1977		desc_scsi_status = "check condition";
1978		break;
1979	case MPI2_SCSI_STATUS_CONDITION_MET:
1980		desc_scsi_status = "condition met";
1981		break;
1982	case MPI2_SCSI_STATUS_BUSY:
1983		desc_scsi_status = "busy";
1984		break;
1985	case MPI2_SCSI_STATUS_INTERMEDIATE:
1986		desc_scsi_status = "intermediate";
1987		break;
1988	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1989		desc_scsi_status = "intermediate condmet";
1990		break;
1991	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1992		desc_scsi_status = "reservation conflict";
1993		break;
1994	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1995		desc_scsi_status = "command terminated";
1996		break;
1997	case MPI2_SCSI_STATUS_TASK_SET_FULL:
1998		desc_scsi_status = "task set full";
1999		break;
2000	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2001		desc_scsi_status = "aca active";
2002		break;
2003	case MPI2_SCSI_STATUS_TASK_ABORTED:
2004		desc_scsi_status = "task aborted";
2005		break;
2006	default:
2007		desc_scsi_status = "unknown";
2008		break;
2009	}
2010
2011	desc_scsi_state[0] = '\0';
2012	if (!scsi_state)
2013		desc_scsi_state = " ";
2014	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2015		strcat(desc_scsi_state, "response info ");
2016	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2017		strcat(desc_scsi_state, "state terminated ");
2018	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2019		strcat(desc_scsi_state, "no status ");
2020	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2021		strcat(desc_scsi_state, "autosense failed ");
2022	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2023		strcat(desc_scsi_state, "autosense valid ");
2024
2025	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2026	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2027	/* We can add more detail about underflow data here
2028	 * TO-DO
2029	 * */
2030	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2031	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status,
2032	    scsi_status, desc_scsi_state, scsi_state);
2033
2034	if (sc->mps_debug & MPS_XINFO &&
2035		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2036		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2037		scsi_sense_print(csio);
2038		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2039	}
2040
2041	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2042		response_info = le32toh(mpi_reply->ResponseInfo);
2043		response_bytes = (u8 *)&response_info;
2044		mps_response_code(sc,response_bytes[0]);
2045	}
2046}
2047
2048static void
2049mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2050{
2051	MPI2_SCSI_IO_REPLY *rep;
2052	union ccb *ccb;
2053	struct ccb_scsiio *csio;
2054	struct mpssas_softc *sassc;
2055	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2056	u8 *TLR_bits, TLR_on;
2057	int dir = 0, i;
2058	u16 alloc_len;
2059
2060	MPS_FUNCTRACE(sc);
2061	mps_dprint(sc, MPS_TRACE,
2062	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2063	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2064	    cm->cm_targ->outstanding);
2065
2066	callout_stop(&cm->cm_callout);
2067	mtx_assert(&sc->mps_mtx, MA_OWNED);
2068
2069	sassc = sc->sassc;
2070	ccb = cm->cm_complete_data;
2071	csio = &ccb->csio;
2072	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2073	/*
2074	 * XXX KDM if the chain allocation fails, does it matter if we do
2075	 * the sync and unload here?  It is simpler to do it in every case,
2076	 * assuming it doesn't cause problems.
2077	 */
2078	if (cm->cm_data != NULL) {
2079		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2080			dir = BUS_DMASYNC_POSTREAD;
2081		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2082			dir = BUS_DMASYNC_POSTWRITE;
2083		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2084		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2085	}
2086
2087	cm->cm_targ->completed++;
2088	cm->cm_targ->outstanding--;
2089	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2090
2091	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2092		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2093		if (cm->cm_reply != NULL)
2094			mpssas_log_command(cm, MPS_RECOVERY,
2095			    "completed timedout cm %p ccb %p during recovery "
2096			    "ioc %x scsi %x state %x xfer %u\n",
2097			    cm, cm->cm_ccb,
2098			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2099			    le32toh(rep->TransferCount));
2100		else
2101			mpssas_log_command(cm, MPS_RECOVERY,
2102			    "completed timedout cm %p ccb %p during recovery\n",
2103			    cm, cm->cm_ccb);
2104	} else if (cm->cm_targ->tm != NULL) {
2105		if (cm->cm_reply != NULL)
2106			mpssas_log_command(cm, MPS_RECOVERY,
2107			    "completed cm %p ccb %p during recovery "
2108			    "ioc %x scsi %x state %x xfer %u\n",
2109			    cm, cm->cm_ccb,
2110			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2111			    le32toh(rep->TransferCount));
2112		else
2113			mpssas_log_command(cm, MPS_RECOVERY,
2114			    "completed cm %p ccb %p during recovery\n",
2115			    cm, cm->cm_ccb);
2116	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2117		mpssas_log_command(cm, MPS_RECOVERY,
2118		    "reset completed cm %p ccb %p\n",
2119		    cm, cm->cm_ccb);
2120	}
2121
2122	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2123		/*
2124		 * We ran into an error after we tried to map the command,
2125		 * so we're getting a callback without queueing the command
2126		 * to the hardware.  So we set the status here, and it will
2127		 * be retained below.  We'll go through the "fast path",
2128		 * because there can be no reply when we haven't actually
2129		 * gone out to the hardware.
2130		 */
2131		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2132
2133		/*
2134		 * Currently the only error included in the mask is
2135		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2136		 * chain frames.  We need to freeze the queue until we get
2137		 * a command that completed without this error, which will
2138		 * hopefully have some chain frames attached that we can
2139		 * use.  If we wanted to get smarter about it, we would
2140		 * only unfreeze the queue in this condition when we're
2141		 * sure that we're getting some chain frames back.  That's
2142		 * probably unnecessary.
2143		 */
2144		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2145			xpt_freeze_simq(sassc->sim, 1);
2146			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2147			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2148				   "freezing SIM queue\n");
2149		}
2150	}
2151
2152	/* Take the fast path to completion */
2153	if (cm->cm_reply == NULL) {
2154		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2155			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2156				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2157			else {
2158				ccb->ccb_h.status = CAM_REQ_CMP;
2159				ccb->csio.scsi_status = SCSI_STATUS_OK;
2160			}
2161			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2162				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2163				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2164				mps_dprint(sc, MPS_XINFO,
2165					   "Unfreezing SIM queue\n");
2166			}
2167		}
2168
2169		/*
2170		 * There are two scenarios where the status won't be
2171		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2172		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2173		 */
2174		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2175			/*
2176			 * Freeze the dev queue so that commands are
2177			 * executed in the correct order with after error
2178			 * recovery.
2179			 */
2180			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2181			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2182		}
2183		mps_free_command(sc, cm);
2184		xpt_done(ccb);
2185		return;
2186	}
2187
2188	mpssas_log_command(cm, MPS_XINFO,
2189	    "ioc %x scsi %x state %x xfer %u\n",
2190	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2191	    le32toh(rep->TransferCount));
2192
2193	/*
2194	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2195	 * Volume if an error occurred (normal I/O retry).  Use the original
2196	 * CCB, but set a flag that this will be a retry so that it's sent to
2197	 * the original volume.  Free the command but reuse the CCB.
2198	 */
2199	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2200		mps_free_command(sc, cm);
2201		ccb->ccb_h.status = MPS_WD_RETRY;
2202		mpssas_action_scsiio(sassc, ccb);
2203		return;
2204	}
2205
2206	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2207	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2208		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2209		/* FALLTHROUGH */
2210	case MPI2_IOCSTATUS_SUCCESS:
2211	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2212
2213		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2214		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2215			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2216
2217		/* Completion failed at the transport level. */
2218		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2219		    MPI2_SCSI_STATE_TERMINATED)) {
2220			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2221			break;
2222		}
2223
2224		/* In a modern packetized environment, an autosense failure
2225		 * implies that there's not much else that can be done to
2226		 * recover the command.
2227		 */
2228		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2229			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2230			break;
2231		}
2232
2233		/*
2234		 * CAM doesn't care about SAS Response Info data, but if this is
2235		 * the state check if TLR should be done.  If not, clear the
2236		 * TLR_bits for the target.
2237		 */
2238		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2239		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2240		    MPS_SCSI_RI_INVALID_FRAME)) {
2241			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2242			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2243		}
2244
2245		/*
2246		 * Intentionally override the normal SCSI status reporting
2247		 * for these two cases.  These are likely to happen in a
2248		 * multi-initiator environment, and we want to make sure that
2249		 * CAM retries these commands rather than fail them.
2250		 */
2251		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2252		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2253			ccb->ccb_h.status = CAM_REQ_ABORTED;
2254			break;
2255		}
2256
2257		/* Handle normal status and sense */
2258		csio->scsi_status = rep->SCSIStatus;
2259		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2260			ccb->ccb_h.status = CAM_REQ_CMP;
2261		else
2262			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2263
2264		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2265			int sense_len, returned_sense_len;
2266
2267			returned_sense_len = min(le32toh(rep->SenseCount),
2268			    sizeof(struct scsi_sense_data));
2269			if (returned_sense_len < ccb->csio.sense_len)
2270				ccb->csio.sense_resid = ccb->csio.sense_len -
2271					returned_sense_len;
2272			else
2273				ccb->csio.sense_resid = 0;
2274
2275			sense_len = min(returned_sense_len,
2276			    ccb->csio.sense_len - ccb->csio.sense_resid);
2277			bzero(&ccb->csio.sense_data,
2278			      sizeof(ccb->csio.sense_data));
2279			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2280			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2281		}
2282
2283		/*
2284		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2285		 * and it's page code 0 (Supported Page List), and there is
2286		 * inquiry data, and this is for a sequential access device, and
2287		 * the device is an SSP target, and TLR is supported by the
2288		 * controller, turn the TLR_bits value ON if page 0x90 is
2289		 * supported.
2290		 */
2291		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2292		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2293		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2294		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2295		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2296		    T_SEQUENTIAL) && (sc->control_TLR) &&
2297		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2298		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2299			vpd_list = (struct scsi_vpd_supported_page_list *)
2300			    csio->data_ptr;
2301			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2302			    TLR_bits;
2303			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2304			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2305			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2306			    csio->cdb_io.cdb_bytes[4];
2307			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2308				if (vpd_list->list[i] == 0x90) {
2309					*TLR_bits = TLR_on;
2310					break;
2311				}
2312			}
2313		}
2314		break;
2315	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2316	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2317		/*
2318		 * If devinfo is 0 this will be a volume.  In that case don't
2319		 * tell CAM that the volume is not there.  We want volumes to
2320		 * be enumerated until they are deleted/removed, not just
2321		 * failed.
2322		 */
2323		if (cm->cm_targ->devinfo == 0)
2324			ccb->ccb_h.status = CAM_REQ_CMP;
2325		else
2326			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2327		break;
2328	case MPI2_IOCSTATUS_INVALID_SGL:
2329		mps_print_scsiio_cmd(sc, cm);
2330		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2331		break;
2332	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2333		/*
2334		 * This is one of the responses that comes back when an I/O
2335		 * has been aborted.  If it is because of a timeout that we
2336		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2337		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2338		 * command is the same (it gets retried, subject to the
2339		 * retry counter), the only difference is what gets printed
2340		 * on the console.
2341		 */
2342		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2343			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2344		else
2345			ccb->ccb_h.status = CAM_REQ_ABORTED;
2346		break;
2347	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2348		/* resid is ignored for this condition */
2349		csio->resid = 0;
2350		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2351		break;
2352	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2353	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2354		/*
2355		 * Since these are generally external (i.e. hopefully
2356		 * transient transport-related) errors, retry these without
2357		 * decrementing the retry count.
2358		 */
2359		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2360		mpssas_log_command(cm, MPS_INFO,
2361		    "terminated ioc %x scsi %x state %x xfer %u\n",
2362		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2363		    le32toh(rep->TransferCount));
2364		break;
2365	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2366	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2367	case MPI2_IOCSTATUS_INVALID_VPID:
2368	case MPI2_IOCSTATUS_INVALID_FIELD:
2369	case MPI2_IOCSTATUS_INVALID_STATE:
2370	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2371	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2372	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2373	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2374	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2375	default:
2376		mpssas_log_command(cm, MPS_XINFO,
2377		    "completed ioc %x scsi %x state %x xfer %u\n",
2378		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2379		    le32toh(rep->TransferCount));
2380		csio->resid = cm->cm_length;
2381		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2382		break;
2383	}
2384
2385	mps_sc_failed_io_info(sc,csio,rep);
2386
2387	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2388		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2389		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2390		mps_dprint(sc, MPS_XINFO, "Command completed, "
2391			   "unfreezing SIM queue\n");
2392	}
2393
2394	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2395		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2396		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2397	}
2398
2399	mps_free_command(sc, cm);
2400	xpt_done(ccb);
2401}
2402
2403/* All Request reached here are Endian safe */
2404static void
2405mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2406    union ccb *ccb) {
2407	pMpi2SCSIIORequest_t	pIO_req;
2408	struct mps_softc	*sc = sassc->sc;
2409	uint64_t		virtLBA;
2410	uint32_t		physLBA, stripe_offset, stripe_unit;
2411	uint32_t		io_size, column;
2412	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2413
2414	/*
2415	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2416	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2417	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2418	 * bit different than the 10/16 CDBs, handle them separately.
2419	 */
2420	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2421	CDB = pIO_req->CDB.CDB32;
2422
2423	/*
2424	 * Handle 6 byte CDBs.
2425	 */
2426	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2427	    (CDB[0] == WRITE_6))) {
2428		/*
2429		 * Get the transfer size in blocks.
2430		 */
2431		io_size = (cm->cm_length >> sc->DD_block_exponent);
2432
2433		/*
2434		 * Get virtual LBA given in the CDB.
2435		 */
2436		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2437		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2438
2439		/*
2440		 * Check that LBA range for I/O does not exceed volume's
2441		 * MaxLBA.
2442		 */
2443		if ((virtLBA + (uint64_t)io_size - 1) <=
2444		    sc->DD_max_lba) {
2445			/*
2446			 * Check if the I/O crosses a stripe boundary.  If not,
2447			 * translate the virtual LBA to a physical LBA and set
2448			 * the DevHandle for the PhysDisk to be used.  If it
2449			 * does cross a boundry, do normal I/O.  To get the
2450			 * right DevHandle to use, get the map number for the
2451			 * column, then use that map number to look up the
2452			 * DevHandle of the PhysDisk.
2453			 */
2454			stripe_offset = (uint32_t)virtLBA &
2455			    (sc->DD_stripe_size - 1);
2456			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2457				physLBA = (uint32_t)virtLBA >>
2458				    sc->DD_stripe_exponent;
2459				stripe_unit = physLBA / sc->DD_num_phys_disks;
2460				column = physLBA % sc->DD_num_phys_disks;
2461				pIO_req->DevHandle =
2462				    htole16(sc->DD_column_map[column].dev_handle);
2463				/* ???? Is this endian safe*/
2464				cm->cm_desc.SCSIIO.DevHandle =
2465				    pIO_req->DevHandle;
2466
2467				physLBA = (stripe_unit <<
2468				    sc->DD_stripe_exponent) + stripe_offset;
2469				ptrLBA = &pIO_req->CDB.CDB32[1];
2470				physLBA_byte = (uint8_t)(physLBA >> 16);
2471				*ptrLBA = physLBA_byte;
2472				ptrLBA = &pIO_req->CDB.CDB32[2];
2473				physLBA_byte = (uint8_t)(physLBA >> 8);
2474				*ptrLBA = physLBA_byte;
2475				ptrLBA = &pIO_req->CDB.CDB32[3];
2476				physLBA_byte = (uint8_t)physLBA;
2477				*ptrLBA = physLBA_byte;
2478
2479				/*
2480				 * Set flag that Direct Drive I/O is
2481				 * being done.
2482				 */
2483				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2484			}
2485		}
2486		return;
2487	}
2488
2489	/*
2490	 * Handle 10, 12 or 16 byte CDBs.
2491	 */
2492	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2493	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2494	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2495	    (CDB[0] == WRITE_12))) {
2496		/*
2497		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2498		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2499		 * the else section.  10-byte and 12-byte CDB's are OK.
2500		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2501		 * ready to accept 12byte CDB for Direct IOs.
2502		 */
2503		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2504		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2505		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2506			/*
2507			 * Get the transfer size in blocks.
2508			 */
2509			io_size = (cm->cm_length >> sc->DD_block_exponent);
2510
2511			/*
2512			 * Get virtual LBA.  Point to correct lower 4 bytes of
2513			 * LBA in the CDB depending on command.
2514			 */
2515			lba_idx = ((CDB[0] == READ_12) ||
2516				(CDB[0] == WRITE_12) ||
2517				(CDB[0] == READ_10) ||
2518				(CDB[0] == WRITE_10))? 2 : 6;
2519			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2520			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2521			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2522			    (uint64_t)CDB[lba_idx + 3];
2523
2524			/*
2525			 * Check that LBA range for I/O does not exceed volume's
2526			 * MaxLBA.
2527			 */
2528			if ((virtLBA + (uint64_t)io_size - 1) <=
2529			    sc->DD_max_lba) {
2530				/*
2531				 * Check if the I/O crosses a stripe boundary.
2532				 * If not, translate the virtual LBA to a
2533				 * physical LBA and set the DevHandle for the
2534				 * PhysDisk to be used.  If it does cross a
2535				 * boundry, do normal I/O.  To get the right
2536				 * DevHandle to use, get the map number for the
2537				 * column, then use that map number to look up
2538				 * the DevHandle of the PhysDisk.
2539				 */
2540				stripe_offset = (uint32_t)virtLBA &
2541				    (sc->DD_stripe_size - 1);
2542				if ((stripe_offset + io_size) <=
2543				    sc->DD_stripe_size) {
2544					physLBA = (uint32_t)virtLBA >>
2545					    sc->DD_stripe_exponent;
2546					stripe_unit = physLBA /
2547					    sc->DD_num_phys_disks;
2548					column = physLBA %
2549					    sc->DD_num_phys_disks;
2550					pIO_req->DevHandle =
2551					    htole16(sc->DD_column_map[column].
2552					    dev_handle);
2553					cm->cm_desc.SCSIIO.DevHandle =
2554					    pIO_req->DevHandle;
2555
2556					physLBA = (stripe_unit <<
2557					    sc->DD_stripe_exponent) +
2558					    stripe_offset;
2559					ptrLBA =
2560					    &pIO_req->CDB.CDB32[lba_idx];
2561					physLBA_byte = (uint8_t)(physLBA >> 24);
2562					*ptrLBA = physLBA_byte;
2563					ptrLBA =
2564					    &pIO_req->CDB.CDB32[lba_idx + 1];
2565					physLBA_byte = (uint8_t)(physLBA >> 16);
2566					*ptrLBA = physLBA_byte;
2567					ptrLBA =
2568					    &pIO_req->CDB.CDB32[lba_idx + 2];
2569					physLBA_byte = (uint8_t)(physLBA >> 8);
2570					*ptrLBA = physLBA_byte;
2571					ptrLBA =
2572					    &pIO_req->CDB.CDB32[lba_idx + 3];
2573					physLBA_byte = (uint8_t)physLBA;
2574					*ptrLBA = physLBA_byte;
2575
2576					/*
2577					 * Set flag that Direct Drive I/O is
2578					 * being done.
2579					 */
2580					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2581				}
2582			}
2583		} else {
2584			/*
2585			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2586			 * 0.  Get the transfer size in blocks.
2587			 */
2588			io_size = (cm->cm_length >> sc->DD_block_exponent);
2589
2590			/*
2591			 * Get virtual LBA.
2592			 */
2593			virtLBA = ((uint64_t)CDB[2] << 54) |
2594			    ((uint64_t)CDB[3] << 48) |
2595			    ((uint64_t)CDB[4] << 40) |
2596			    ((uint64_t)CDB[5] << 32) |
2597			    ((uint64_t)CDB[6] << 24) |
2598			    ((uint64_t)CDB[7] << 16) |
2599			    ((uint64_t)CDB[8] << 8) |
2600			    (uint64_t)CDB[9];
2601
2602			/*
2603			 * Check that LBA range for I/O does not exceed volume's
2604			 * MaxLBA.
2605			 */
2606			if ((virtLBA + (uint64_t)io_size - 1) <=
2607			    sc->DD_max_lba) {
2608				/*
2609				 * Check if the I/O crosses a stripe boundary.
2610				 * If not, translate the virtual LBA to a
2611				 * physical LBA and set the DevHandle for the
2612				 * PhysDisk to be used.  If it does cross a
2613				 * boundry, do normal I/O.  To get the right
2614				 * DevHandle to use, get the map number for the
2615				 * column, then use that map number to look up
2616				 * the DevHandle of the PhysDisk.
2617				 */
2618				stripe_offset = (uint32_t)virtLBA &
2619				    (sc->DD_stripe_size - 1);
2620				if ((stripe_offset + io_size) <=
2621				    sc->DD_stripe_size) {
2622					physLBA = (uint32_t)(virtLBA >>
2623					    sc->DD_stripe_exponent);
2624					stripe_unit = physLBA /
2625					    sc->DD_num_phys_disks;
2626					column = physLBA %
2627					    sc->DD_num_phys_disks;
2628					pIO_req->DevHandle =
2629					    htole16(sc->DD_column_map[column].
2630					    dev_handle);
2631					cm->cm_desc.SCSIIO.DevHandle =
2632					    pIO_req->DevHandle;
2633
2634					physLBA = (stripe_unit <<
2635					    sc->DD_stripe_exponent) +
2636					    stripe_offset;
2637
2638					/*
2639					 * Set upper 4 bytes of LBA to 0.  We
2640					 * assume that the phys disks are less
2641					 * than 2 TB's in size.  Then, set the
2642					 * lower 4 bytes.
2643					 */
2644					pIO_req->CDB.CDB32[2] = 0;
2645					pIO_req->CDB.CDB32[3] = 0;
2646					pIO_req->CDB.CDB32[4] = 0;
2647					pIO_req->CDB.CDB32[5] = 0;
2648					ptrLBA = &pIO_req->CDB.CDB32[6];
2649					physLBA_byte = (uint8_t)(physLBA >> 24);
2650					*ptrLBA = physLBA_byte;
2651					ptrLBA = &pIO_req->CDB.CDB32[7];
2652					physLBA_byte = (uint8_t)(physLBA >> 16);
2653					*ptrLBA = physLBA_byte;
2654					ptrLBA = &pIO_req->CDB.CDB32[8];
2655					physLBA_byte = (uint8_t)(physLBA >> 8);
2656					*ptrLBA = physLBA_byte;
2657					ptrLBA = &pIO_req->CDB.CDB32[9];
2658					physLBA_byte = (uint8_t)physLBA;
2659					*ptrLBA = physLBA_byte;
2660
2661					/*
2662					 * Set flag that Direct Drive I/O is
2663					 * being done.
2664					 */
2665					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2666				}
2667			}
2668		}
2669	}
2670}
2671
2672#if __FreeBSD_version >= 900026
2673static void
2674mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2675{
2676	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2677	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2678	uint64_t sasaddr;
2679	union ccb *ccb;
2680
2681	ccb = cm->cm_complete_data;
2682
2683	/*
2684	 * Currently there should be no way we can hit this case.  It only
2685	 * happens when we have a failure to allocate chain frames, and SMP
2686	 * commands require two S/G elements only.  That should be handled
2687	 * in the standard request size.
2688	 */
2689	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2690		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2691			   __func__, cm->cm_flags);
2692		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2693		goto bailout;
2694        }
2695
2696	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2697	if (rpl == NULL) {
2698		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2699		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2700		goto bailout;
2701	}
2702
2703	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2704	sasaddr = le32toh(req->SASAddress.Low);
2705	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2706
2707	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2708	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2709		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2710		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2711		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2712		goto bailout;
2713	}
2714
2715	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2716		   "%#jx completed successfully\n", __func__,
2717		   (uintmax_t)sasaddr);
2718
2719	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2720		ccb->ccb_h.status = CAM_REQ_CMP;
2721	else
2722		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2723
2724bailout:
2725	/*
2726	 * We sync in both directions because we had DMAs in the S/G list
2727	 * in both directions.
2728	 */
2729	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2730			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2731	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2732	mps_free_command(sc, cm);
2733	xpt_done(ccb);
2734}
2735
2736static void
2737mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2738{
2739	struct mps_command *cm;
2740	uint8_t *request, *response;
2741	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2742	struct mps_softc *sc;
2743	struct sglist *sg;
2744	int error;
2745
2746	sc = sassc->sc;
2747	sg = NULL;
2748	error = 0;
2749
2750	/*
2751	 * XXX We don't yet support physical addresses here.
2752	 */
2753	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2754	case CAM_DATA_PADDR:
2755	case CAM_DATA_SG_PADDR:
2756		mps_dprint(sc, MPS_ERROR,
2757			   "%s: physical addresses not supported\n", __func__);
2758		ccb->ccb_h.status = CAM_REQ_INVALID;
2759		xpt_done(ccb);
2760		return;
2761	case CAM_DATA_SG:
2762		/*
2763		 * The chip does not support more than one buffer for the
2764		 * request or response.
2765		 */
2766	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2767		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2768			mps_dprint(sc, MPS_ERROR,
2769				   "%s: multiple request or response "
2770				   "buffer segments not supported for SMP\n",
2771				   __func__);
2772			ccb->ccb_h.status = CAM_REQ_INVALID;
2773			xpt_done(ccb);
2774			return;
2775		}
2776
2777		/*
2778		 * The CAM_SCATTER_VALID flag was originally implemented
2779		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2780		 * We have two.  So, just take that flag to mean that we
2781		 * might have S/G lists, and look at the S/G segment count
2782		 * to figure out whether that is the case for each individual
2783		 * buffer.
2784		 */
2785		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2786			bus_dma_segment_t *req_sg;
2787
2788			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2789			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2790		} else
2791			request = ccb->smpio.smp_request;
2792
2793		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2794			bus_dma_segment_t *rsp_sg;
2795
2796			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2797			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2798		} else
2799			response = ccb->smpio.smp_response;
2800		break;
2801	case CAM_DATA_VADDR:
2802		request = ccb->smpio.smp_request;
2803		response = ccb->smpio.smp_response;
2804		break;
2805	default:
2806		ccb->ccb_h.status = CAM_REQ_INVALID;
2807		xpt_done(ccb);
2808		return;
2809	}
2810
2811	cm = mps_alloc_command(sc);
2812	if (cm == NULL) {
2813		mps_dprint(sc, MPS_ERROR,
2814		    "%s: cannot allocate command\n", __func__);
2815		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2816		xpt_done(ccb);
2817		return;
2818	}
2819
2820	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2821	bzero(req, sizeof(*req));
2822	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2823
2824	/* Allow the chip to use any route to this SAS address. */
2825	req->PhysicalPort = 0xff;
2826
2827	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2828	req->SGLFlags =
2829	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2830
2831	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2832		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2833
2834	mpi_init_sge(cm, req, &req->SGL);
2835
2836	/*
2837	 * Set up a uio to pass into mps_map_command().  This allows us to
2838	 * do one map command, and one busdma call in there.
2839	 */
2840	cm->cm_uio.uio_iov = cm->cm_iovec;
2841	cm->cm_uio.uio_iovcnt = 2;
2842	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2843
2844	/*
2845	 * The read/write flag isn't used by busdma, but set it just in
2846	 * case.  This isn't exactly accurate, either, since we're going in
2847	 * both directions.
2848	 */
2849	cm->cm_uio.uio_rw = UIO_WRITE;
2850
2851	cm->cm_iovec[0].iov_base = request;
2852	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2853	cm->cm_iovec[1].iov_base = response;
2854	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2855
2856	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2857			       cm->cm_iovec[1].iov_len;
2858
2859	/*
2860	 * Trigger a warning message in mps_data_cb() for the user if we
2861	 * wind up exceeding two S/G segments.  The chip expects one
2862	 * segment for the request and another for the response.
2863	 */
2864	cm->cm_max_segs = 2;
2865
2866	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2867	cm->cm_complete = mpssas_smpio_complete;
2868	cm->cm_complete_data = ccb;
2869
2870	/*
2871	 * Tell the mapping code that we're using a uio, and that this is
2872	 * an SMP passthrough request.  There is a little special-case
2873	 * logic there (in mps_data_cb()) to handle the bidirectional
2874	 * transfer.
2875	 */
2876	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2877			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2878
2879	/* The chip data format is little endian. */
2880	req->SASAddress.High = htole32(sasaddr >> 32);
2881	req->SASAddress.Low = htole32(sasaddr);
2882
2883	/*
2884	 * XXX Note that we don't have a timeout/abort mechanism here.
2885	 * From the manual, it looks like task management requests only
2886	 * work for SCSI IO and SATA passthrough requests.  We may need to
2887	 * have a mechanism to retry requests in the event of a chip reset
2888	 * at least.  Hopefully the chip will insure that any errors short
2889	 * of that are relayed back to the driver.
2890	 */
2891	error = mps_map_command(sc, cm);
2892	if ((error != 0) && (error != EINPROGRESS)) {
2893		mps_dprint(sc, MPS_ERROR,
2894			   "%s: error %d returned from mps_map_command()\n",
2895			   __func__, error);
2896		goto bailout_error;
2897	}
2898
2899	return;
2900
2901bailout_error:
2902	mps_free_command(sc, cm);
2903	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2904	xpt_done(ccb);
2905	return;
2906
2907}
2908
2909static void
2910mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2911{
2912	struct mps_softc *sc;
2913	struct mpssas_target *targ;
2914	uint64_t sasaddr = 0;
2915
2916	sc = sassc->sc;
2917
2918	/*
2919	 * Make sure the target exists.
2920	 */
2921	targ = &sassc->targets[ccb->ccb_h.target_id];
2922	if (targ->handle == 0x0) {
2923		mps_dprint(sc, MPS_ERROR,
2924			   "%s: target %d does not exist!\n", __func__,
2925			   ccb->ccb_h.target_id);
2926		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2927		xpt_done(ccb);
2928		return;
2929	}
2930
2931	/*
2932	 * If this device has an embedded SMP target, we'll talk to it
2933	 * directly.
2934	 * figure out what the expander's address is.
2935	 */
2936	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2937		sasaddr = targ->sasaddr;
2938
2939	/*
2940	 * If we don't have a SAS address for the expander yet, try
2941	 * grabbing it from the page 0x83 information cached in the
2942	 * transport layer for this target.  LSI expanders report the
2943	 * expander SAS address as the port-associated SAS address in
2944	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2945	 * 0x83.
2946	 *
2947	 * XXX KDM disable this for now, but leave it commented out so that
2948	 * it is obvious that this is another possible way to get the SAS
2949	 * address.
2950	 *
2951	 * The parent handle method below is a little more reliable, and
2952	 * the other benefit is that it works for devices other than SES
2953	 * devices.  So you can send a SMP request to a da(4) device and it
2954	 * will get routed to the expander that device is attached to.
2955	 * (Assuming the da(4) device doesn't contain an SMP target...)
2956	 */
2957#if 0
2958	if (sasaddr == 0)
2959		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2960#endif
2961
2962	/*
2963	 * If we still don't have a SAS address for the expander, look for
2964	 * the parent device of this device, which is probably the expander.
2965	 */
2966	if (sasaddr == 0) {
2967#ifdef OLD_MPS_PROBE
2968		struct mpssas_target *parent_target;
2969#endif
2970
2971		if (targ->parent_handle == 0x0) {
2972			mps_dprint(sc, MPS_ERROR,
2973				   "%s: handle %d does not have a valid "
2974				   "parent handle!\n", __func__, targ->handle);
2975			ccb->ccb_h.status = CAM_REQ_INVALID;
2976			goto bailout;
2977		}
2978#ifdef OLD_MPS_PROBE
2979		parent_target = mpssas_find_target_by_handle(sassc, 0,
2980			targ->parent_handle);
2981
2982		if (parent_target == NULL) {
2983			mps_dprint(sc, MPS_ERROR,
2984				   "%s: handle %d does not have a valid "
2985				   "parent target!\n", __func__, targ->handle);
2986			ccb->ccb_h.status = CAM_REQ_INVALID;
2987			goto bailout;
2988		}
2989
2990		if ((parent_target->devinfo &
2991		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2992			mps_dprint(sc, MPS_ERROR,
2993				   "%s: handle %d parent %d does not "
2994				   "have an SMP target!\n", __func__,
2995				   targ->handle, parent_target->handle);
2996			ccb->ccb_h.status = CAM_REQ_INVALID;
2997			goto bailout;
2998
2999		}
3000
3001		sasaddr = parent_target->sasaddr;
3002#else /* OLD_MPS_PROBE */
3003		if ((targ->parent_devinfo &
3004		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3005			mps_dprint(sc, MPS_ERROR,
3006				   "%s: handle %d parent %d does not "
3007				   "have an SMP target!\n", __func__,
3008				   targ->handle, targ->parent_handle);
3009			ccb->ccb_h.status = CAM_REQ_INVALID;
3010			goto bailout;
3011
3012		}
3013		if (targ->parent_sasaddr == 0x0) {
3014			mps_dprint(sc, MPS_ERROR,
3015				   "%s: handle %d parent handle %d does "
3016				   "not have a valid SAS address!\n",
3017				   __func__, targ->handle, targ->parent_handle);
3018			ccb->ccb_h.status = CAM_REQ_INVALID;
3019			goto bailout;
3020		}
3021
3022		sasaddr = targ->parent_sasaddr;
3023#endif /* OLD_MPS_PROBE */
3024
3025	}
3026
3027	if (sasaddr == 0) {
3028		mps_dprint(sc, MPS_INFO,
3029			   "%s: unable to find SAS address for handle %d\n",
3030			   __func__, targ->handle);
3031		ccb->ccb_h.status = CAM_REQ_INVALID;
3032		goto bailout;
3033	}
3034	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3035
3036	return;
3037
3038bailout:
3039	xpt_done(ccb);
3040
3041}
3042#endif //__FreeBSD_version >= 900026
3043
3044static void
3045mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3046{
3047	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3048	struct mps_softc *sc;
3049	struct mps_command *tm;
3050	struct mpssas_target *targ;
3051
3052	MPS_FUNCTRACE(sassc->sc);
3053	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3054
3055	sc = sassc->sc;
3056	tm = mps_alloc_command(sc);
3057	if (tm == NULL) {
3058		mps_dprint(sc, MPS_ERROR,
3059		    "comand alloc failure in mpssas_action_resetdev\n");
3060		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3061		xpt_done(ccb);
3062		return;
3063	}
3064
3065	targ = &sassc->targets[ccb->ccb_h.target_id];
3066	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3067	req->DevHandle = htole16(targ->handle);
3068	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3069	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3070
3071	/* SAS Hard Link Reset / SATA Link Reset */
3072	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3073
3074	tm->cm_data = NULL;
3075	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3076	tm->cm_complete = mpssas_resetdev_complete;
3077	tm->cm_complete_data = ccb;
3078	tm->cm_targ = targ;
3079	mps_map_command(sc, tm);
3080}
3081
3082static void
3083mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3084{
3085	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3086	union ccb *ccb;
3087
3088	MPS_FUNCTRACE(sc);
3089	mtx_assert(&sc->mps_mtx, MA_OWNED);
3090
3091	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3092	ccb = tm->cm_complete_data;
3093
3094	/*
3095	 * Currently there should be no way we can hit this case.  It only
3096	 * happens when we have a failure to allocate chain frames, and
3097	 * task management commands don't have S/G lists.
3098	 */
3099	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3100		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3101
3102		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3103
3104		mps_dprint(sc, MPS_ERROR,
3105			   "%s: cm_flags = %#x for reset of handle %#04x! "
3106			   "This should not happen!\n", __func__, tm->cm_flags,
3107			   req->DevHandle);
3108		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3109		goto bailout;
3110	}
3111
3112	mps_dprint(sc, MPS_XINFO,
3113	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3114	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3115
3116	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3117		ccb->ccb_h.status = CAM_REQ_CMP;
3118		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3119		    CAM_LUN_WILDCARD);
3120	}
3121	else
3122		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3123
3124bailout:
3125
3126	mpssas_free_tm(sc, tm);
3127	xpt_done(ccb);
3128}
3129
3130static void
3131mpssas_poll(struct cam_sim *sim)
3132{
3133	struct mpssas_softc *sassc;
3134
3135	sassc = cam_sim_softc(sim);
3136
3137	if (sassc->sc->mps_debug & MPS_TRACE) {
3138		/* frequent debug messages during a panic just slow
3139		 * everything down too much.
3140		 */
3141		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3142		sassc->sc->mps_debug &= ~MPS_TRACE;
3143	}
3144
3145	mps_intr_locked(sassc->sc);
3146}
3147
3148static void
3149mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3150	     void *arg)
3151{
3152	struct mps_softc *sc;
3153
3154	sc = (struct mps_softc *)callback_arg;
3155
3156	switch (code) {
3157#if (__FreeBSD_version >= 1000006) || \
3158    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3159	case AC_ADVINFO_CHANGED: {
3160		struct mpssas_target *target;
3161		struct mpssas_softc *sassc;
3162		struct scsi_read_capacity_data_long rcap_buf;
3163		struct ccb_dev_advinfo cdai;
3164		struct mpssas_lun *lun;
3165		lun_id_t lunid;
3166		int found_lun;
3167		uintptr_t buftype;
3168
3169		buftype = (uintptr_t)arg;
3170
3171		found_lun = 0;
3172		sassc = sc->sassc;
3173
3174		/*
3175		 * We're only interested in read capacity data changes.
3176		 */
3177		if (buftype != CDAI_TYPE_RCAPLONG)
3178			break;
3179
3180		/*
3181		 * We should have a handle for this, but check to make sure.
3182		 */
3183		target = &sassc->targets[xpt_path_target_id(path)];
3184		if (target->handle == 0)
3185			break;
3186
3187		lunid = xpt_path_lun_id(path);
3188
3189		SLIST_FOREACH(lun, &target->luns, lun_link) {
3190			if (lun->lun_id == lunid) {
3191				found_lun = 1;
3192				break;
3193			}
3194		}
3195
3196		if (found_lun == 0) {
3197			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3198				     M_NOWAIT | M_ZERO);
3199			if (lun == NULL) {
3200				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3201					   "LUN for EEDP support.\n");
3202				break;
3203			}
3204			lun->lun_id = lunid;
3205			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3206		}
3207
3208		bzero(&rcap_buf, sizeof(rcap_buf));
3209		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3210		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3211		cdai.ccb_h.flags = CAM_DIR_IN;
3212		cdai.buftype = CDAI_TYPE_RCAPLONG;
3213		cdai.flags = 0;
3214		cdai.bufsiz = sizeof(rcap_buf);
3215		cdai.buf = (uint8_t *)&rcap_buf;
3216		xpt_action((union ccb *)&cdai);
3217		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3218			cam_release_devq(cdai.ccb_h.path,
3219					 0, 0, 0, FALSE);
3220
3221		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3222		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3223			lun->eedp_formatted = TRUE;
3224			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3225		} else {
3226			lun->eedp_formatted = FALSE;
3227			lun->eedp_block_size = 0;
3228		}
3229		break;
3230	}
3231#else
3232	case AC_FOUND_DEVICE: {
3233		struct ccb_getdev *cgd;
3234
3235		cgd = arg;
3236		mpssas_check_eedp(sc, path, cgd);
3237		break;
3238	}
3239#endif
3240	default:
3241		break;
3242	}
3243}
3244
3245#if (__FreeBSD_version < 901503) || \
3246    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3247static void
3248mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3249		  struct ccb_getdev *cgd)
3250{
3251	struct mpssas_softc *sassc = sc->sassc;
3252	struct ccb_scsiio *csio;
3253	struct scsi_read_capacity_16 *scsi_cmd;
3254	struct scsi_read_capacity_eedp *rcap_buf;
3255	path_id_t pathid;
3256	target_id_t targetid;
3257	lun_id_t lunid;
3258	union ccb *ccb;
3259	struct cam_path *local_path;
3260	struct mpssas_target *target;
3261	struct mpssas_lun *lun;
3262	uint8_t	found_lun;
3263	char path_str[64];
3264
3265	sassc = sc->sassc;
3266	pathid = cam_sim_path(sassc->sim);
3267	targetid = xpt_path_target_id(path);
3268	lunid = xpt_path_lun_id(path);
3269
3270	target = &sassc->targets[targetid];
3271	if (target->handle == 0x0)
3272		return;
3273
3274	/*
3275	 * Determine if the device is EEDP capable.
3276	 *
3277	 * If this flag is set in the inquiry data,
3278	 * the device supports protection information,
3279	 * and must support the 16 byte read
3280	 * capacity command, otherwise continue without
3281	 * sending read cap 16
3282	 */
3283	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3284		return;
3285
3286	/*
3287	 * Issue a READ CAPACITY 16 command.  This info
3288	 * is used to determine if the LUN is formatted
3289	 * for EEDP support.
3290	 */
3291	ccb = xpt_alloc_ccb_nowait();
3292	if (ccb == NULL) {
3293		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3294		    "for EEDP support.\n");
3295		return;
3296	}
3297
3298	if (xpt_create_path(&local_path, xpt_periph,
3299	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3300		mps_dprint(sc, MPS_ERROR, "Unable to create "
3301		    "path for EEDP support\n");
3302		xpt_free_ccb(ccb);
3303		return;
3304	}
3305
3306	/*
3307	 * If LUN is already in list, don't create a new
3308	 * one.
3309	 */
3310	found_lun = FALSE;
3311	SLIST_FOREACH(lun, &target->luns, lun_link) {
3312		if (lun->lun_id == lunid) {
3313			found_lun = TRUE;
3314			break;
3315		}
3316	}
3317	if (!found_lun) {
3318		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3319		    M_NOWAIT | M_ZERO);
3320		if (lun == NULL) {
3321			mps_dprint(sc, MPS_ERROR,
3322			    "Unable to alloc LUN for EEDP support.\n");
3323			xpt_free_path(local_path);
3324			xpt_free_ccb(ccb);
3325			return;
3326		}
3327		lun->lun_id = lunid;
3328		SLIST_INSERT_HEAD(&target->luns, lun,
3329		    lun_link);
3330	}
3331
3332	xpt_path_string(local_path, path_str, sizeof(path_str));
3333	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3334	    path_str, target->handle);
3335
3336	/*
3337	 * Issue a READ CAPACITY 16 command for the LUN.
3338	 * The mpssas_read_cap_done function will load
3339	 * the read cap info into the LUN struct.
3340	 */
3341	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3342	    M_MPT2, M_NOWAIT | M_ZERO);
3343	if (rcap_buf == NULL) {
3344		mps_dprint(sc, MPS_FAULT,
3345		    "Unable to alloc read capacity buffer for EEDP support.\n");
3346		xpt_free_path(ccb->ccb_h.path);
3347		xpt_free_ccb(ccb);
3348		return;
3349	}
3350	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3351	csio = &ccb->csio;
3352	csio->ccb_h.func_code = XPT_SCSI_IO;
3353	csio->ccb_h.flags = CAM_DIR_IN;
3354	csio->ccb_h.retry_count = 4;
3355	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3356	csio->ccb_h.timeout = 60000;
3357	csio->data_ptr = (uint8_t *)rcap_buf;
3358	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3359	csio->sense_len = MPS_SENSE_LEN;
3360	csio->cdb_len = sizeof(*scsi_cmd);
3361	csio->tag_action = MSG_SIMPLE_Q_TAG;
3362
3363	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3364	bzero(scsi_cmd, sizeof(*scsi_cmd));
3365	scsi_cmd->opcode = 0x9E;
3366	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3367	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3368
3369	ccb->ccb_h.ppriv_ptr1 = sassc;
3370	xpt_action(ccb);
3371}
3372
3373static void
3374mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3375{
3376	struct mpssas_softc *sassc;
3377	struct mpssas_target *target;
3378	struct mpssas_lun *lun;
3379	struct scsi_read_capacity_eedp *rcap_buf;
3380
3381	if (done_ccb == NULL)
3382		return;
3383
3384	/* Driver need to release devq, it Scsi command is
3385	 * generated by driver internally.
3386	 * Currently there is a single place where driver
3387	 * calls scsi command internally. In future if driver
3388	 * calls more scsi command internally, it needs to release
3389	 * devq internally, since those command will not go back to
3390	 * cam_periph.
3391	 */
3392	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3393        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3394		xpt_release_devq(done_ccb->ccb_h.path,
3395			       	/*count*/ 1, /*run_queue*/TRUE);
3396	}
3397
3398	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3399
3400	/*
3401	 * Get the LUN ID for the path and look it up in the LUN list for the
3402	 * target.
3403	 */
3404	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3405	target = &sassc->targets[done_ccb->ccb_h.target_id];
3406	SLIST_FOREACH(lun, &target->luns, lun_link) {
3407		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3408			continue;
3409
3410		/*
3411		 * Got the LUN in the target's LUN list.  Fill it in
3412		 * with EEDP info.  If the READ CAP 16 command had some
3413		 * SCSI error (common if command is not supported), mark
3414		 * the lun as not supporting EEDP and set the block size
3415		 * to 0.
3416		 */
3417		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3418		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3419			lun->eedp_formatted = FALSE;
3420			lun->eedp_block_size = 0;
3421			break;
3422		}
3423
3424		if (rcap_buf->protect & 0x01) {
3425			lun->eedp_formatted = TRUE;
3426			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3427		}
3428		break;
3429	}
3430
3431	// Finished with this CCB and path.
3432	free(rcap_buf, M_MPT2);
3433	xpt_free_path(done_ccb->ccb_h.path);
3434	xpt_free_ccb(done_ccb);
3435}
3436#endif /* (__FreeBSD_version < 901503) || \
3437          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3438
3439int
3440mpssas_startup(struct mps_softc *sc)
3441{
3442	struct mpssas_softc *sassc;
3443
3444	/*
3445	 * Send the port enable message and set the wait_for_port_enable flag.
3446	 * This flag helps to keep the simq frozen until all discovery events
3447	 * are processed.
3448	 */
3449	sassc = sc->sassc;
3450	mpssas_startup_increment(sassc);
3451	sc->wait_for_port_enable = 1;
3452	mpssas_send_portenable(sc);
3453	return (0);
3454}
3455
3456static int
3457mpssas_send_portenable(struct mps_softc *sc)
3458{
3459	MPI2_PORT_ENABLE_REQUEST *request;
3460	struct mps_command *cm;
3461
3462	MPS_FUNCTRACE(sc);
3463
3464	if ((cm = mps_alloc_command(sc)) == NULL)
3465		return (EBUSY);
3466	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3467	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3468	request->MsgFlags = 0;
3469	request->VP_ID = 0;
3470	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3471	cm->cm_complete = mpssas_portenable_complete;
3472	cm->cm_data = NULL;
3473	cm->cm_sge = NULL;
3474
3475	mps_map_command(sc, cm);
3476	mps_dprint(sc, MPS_XINFO,
3477	    "mps_send_portenable finished cm %p req %p complete %p\n",
3478	    cm, cm->cm_req, cm->cm_complete);
3479	return (0);
3480}
3481
3482static void
3483mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3484{
3485	MPI2_PORT_ENABLE_REPLY *reply;
3486	struct mpssas_softc *sassc;
3487
3488	MPS_FUNCTRACE(sc);
3489	sassc = sc->sassc;
3490
3491	/*
3492	 * Currently there should be no way we can hit this case.  It only
3493	 * happens when we have a failure to allocate chain frames, and
3494	 * port enable commands don't have S/G lists.
3495	 */
3496	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3497		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3498			   "This should not happen!\n", __func__, cm->cm_flags);
3499	}
3500
3501	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3502	if (reply == NULL)
3503		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3504	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3505	    MPI2_IOCSTATUS_SUCCESS)
3506		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3507
3508	mps_free_command(sc, cm);
3509	if (sc->mps_ich.ich_arg != NULL) {
3510		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3511		config_intrhook_disestablish(&sc->mps_ich);
3512		sc->mps_ich.ich_arg = NULL;
3513	}
3514
3515	/*
3516	 * Get WarpDrive info after discovery is complete but before the scan
3517	 * starts.  At this point, all devices are ready to be exposed to the
3518	 * OS.  If devices should be hidden instead, take them out of the
3519	 * 'targets' array before the scan.  The devinfo for a disk will have
3520	 * some info and a volume's will be 0.  Use that to remove disks.
3521	 */
3522	mps_wd_config_pages(sc);
3523
3524	/*
3525	 * Done waiting for port enable to complete.  Decrement the refcount.
3526	 * If refcount is 0, discovery is complete and a rescan of the bus can
3527	 * take place.  Since the simq was explicitly frozen before port
3528	 * enable, it must be explicitly released here to keep the
3529	 * freeze/release count in sync.
3530	 */
3531	sc->wait_for_port_enable = 0;
3532	sc->port_enable_complete = 1;
3533	wakeup(&sc->port_enable_complete);
3534	mpssas_startup_decrement(sassc);
3535	xpt_release_simq(sassc->sim, 1);
3536}
3537
3538