1 /*
2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3  * i.e. Thunderbolt and Invader
4  *
5  * Solaris MegaRAID device driver for SAS2.0 controllers
6  * Copyright (c) 2008-2012, LSI Logic Corporation.
7  * All rights reserved.
8  *
9  * Version:
10  * Author:
11  *		Swaminathan K S
12  *		Arun Chandrashekhar
13  *		Manju R
14  *		Rasheed
15  *		Shakeel Bukhari
16  */
17 
18 /*
19  * Copyright 2018 Nexenta Systems, Inc.
20  * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
21  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
22  */
23 
24 
25 #include <sys/types.h>
26 #include <sys/file.h>
27 #include <sys/atomic.h>
28 #include <sys/scsi/scsi.h>
29 #include <sys/byteorder.h>
30 #include <sys/sdt.h>
31 #include "ld_pd_map.h"
32 #include "mr_sas.h"
33 #include "fusion.h"
34 
35 /*
36  * FMA header files
37  */
38 #include <sys/ddifm.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
42 
43 
44 /* Pre-TB command size and TB command size. */
45 #define	MR_COMMAND_SIZE (64*20)	/* 1280 bytes */
46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
52 extern struct ddi_device_acc_attr endian_attr;
53 extern int	debug_level_g;
54 extern unsigned int	enable_fp;
55 volatile int dump_io_wait_time = 900;
56 extern volatile int  debug_timeout_g;
57 extern int	mrsas_issue_pending_cmds(struct mrsas_instance *);
58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
59 extern void	push_pending_mfi_pkt(struct mrsas_instance *,
60 			struct mrsas_cmd *);
61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
62 	    MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
63 
64 /* Local static prototypes. */
65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
66     struct scsi_address *, struct scsi_pkt *, uchar_t *);
67 static void mrsas_tbolt_set_pd_lba(U8 *, size_t, uint8_t *, U64, U32);
68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
73     struct mrsas_tbolt_pd_info *, int);
74 
75 static int mrsas_debug_tbolt_fw_faults_after_ocr = 0;
76 
77 /*
78  * destroy_mfi_mpi_frame_pool
79  */
80 void
destroy_mfi_mpi_frame_pool(struct mrsas_instance * instance)81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
82 {
83 	int	i;
84 
85 	struct mrsas_cmd	*cmd;
86 
87 	/* return all mfi frames to pool */
88 	for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
89 		cmd = instance->cmd_list[i];
90 		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
91 			(void) mrsas_free_dma_obj(instance,
92 			    cmd->frame_dma_obj);
93 		}
94 		cmd->frame_dma_obj_status = DMA_OBJ_FREED;
95 	}
96 }
97 
98 /*
99  * destroy_mpi2_frame_pool
100  */
101 void
destroy_mpi2_frame_pool(struct mrsas_instance * instance)102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
103 {
104 
105 	if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
106 		(void) mrsas_free_dma_obj(instance,
107 		    instance->mpi2_frame_pool_dma_obj);
108 		instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
109 	}
110 }
111 
112 
113 /*
114  * mrsas_tbolt_free_additional_dma_buffer
115  */
116 void
mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance * instance)117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
118 {
119 	int i;
120 
121 	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
122 		(void) mrsas_free_dma_obj(instance,
123 		    instance->mfi_internal_dma_obj);
124 		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
125 	}
126 	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
127 		(void) mrsas_free_dma_obj(instance,
128 		    instance->mfi_evt_detail_obj);
129 		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
130 	}
131 
132 	for (i = 0; i < 2; i++) {
133 		if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
134 			(void) mrsas_free_dma_obj(instance,
135 			    instance->ld_map_obj[i]);
136 			instance->ld_map_obj[i].status = DMA_OBJ_FREED;
137 		}
138 	}
139 }
140 
141 
142 /*
143  * free_req_desc_pool
144  */
145 void
free_req_rep_desc_pool(struct mrsas_instance * instance)146 free_req_rep_desc_pool(struct mrsas_instance *instance)
147 {
148 	if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
149 		(void) mrsas_free_dma_obj(instance,
150 		    instance->request_desc_dma_obj);
151 		instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
152 	}
153 
154 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
155 		(void) mrsas_free_dma_obj(instance,
156 		    instance->reply_desc_dma_obj);
157 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
158 	}
159 
160 
161 }
162 
163 
164 /*
165  * ThunderBolt(TB) Request Message Frame Pool
166  */
167 int
create_mpi2_frame_pool(struct mrsas_instance * instance)168 create_mpi2_frame_pool(struct mrsas_instance *instance)
169 {
170 	int		i = 0;
171 	uint16_t	max_cmd;
172 	uint32_t	sgl_sz;
173 	uint32_t	raid_msg_size;
174 	uint32_t	total_size;
175 	uint32_t	offset;
176 	uint32_t	io_req_base_phys;
177 	uint8_t		*io_req_base;
178 	struct mrsas_cmd	*cmd;
179 
180 	max_cmd = instance->max_fw_cmds;
181 
182 	sgl_sz		= 1024;
183 	raid_msg_size	= MRSAS_THUNDERBOLT_MSG_SIZE;
184 
185 	/* Allocating additional 256 bytes to accomodate SMID 0. */
186 	total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
187 	    (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
188 
189 	con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
190 	    "max_cmd %x", max_cmd));
191 
192 	con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
193 	    "request message frame pool size %x", total_size));
194 
195 	/*
196 	 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 	 * and then split the memory to 1024 commands. Each command should be
198 	 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 	 * within it. Further refer the "alloc_req_rep_desc" function where
200 	 * we allocate request/reply descriptors queues for a clue.
201 	 */
202 
203 	instance->mpi2_frame_pool_dma_obj.size = total_size;
204 	instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
205 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
206 	    0xFFFFFFFFU;
207 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
208 	    0xFFFFFFFFU;
209 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
210 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
211 
212 	if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
213 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
214 		dev_err(instance->dip, CE_WARN,
215 		    "could not alloc mpi2 frame pool");
216 		return (DDI_FAILURE);
217 	}
218 
219 	bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
220 	instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
221 
222 	instance->io_request_frames =
223 	    (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
224 	instance->io_request_frames_phy =
225 	    (uint32_t)
226 	    instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
227 
228 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
229 	    (void *)instance->io_request_frames));
230 
231 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
232 	    instance->io_request_frames_phy));
233 
234 	io_req_base = (uint8_t *)instance->io_request_frames +
235 	    MRSAS_THUNDERBOLT_MSG_SIZE;
236 	io_req_base_phys = instance->io_request_frames_phy +
237 	    MRSAS_THUNDERBOLT_MSG_SIZE;
238 
239 	con_log(CL_DLEVEL3, (CE_NOTE,
240 	    "io req_base_phys 0x%x", io_req_base_phys));
241 
242 	for (i = 0; i < max_cmd; i++) {
243 		cmd = instance->cmd_list[i];
244 
245 		offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
246 
247 		cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
248 		    ((uint8_t *)io_req_base + offset);
249 		cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
250 
251 		cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
252 		    (max_cmd * raid_msg_size) + i * sgl_sz);
253 
254 		cmd->sgl_phys_addr = (io_req_base_phys +
255 		    (max_cmd * raid_msg_size) + i * sgl_sz);
256 
257 		cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
258 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
259 		    (i * SENSE_LENGTH));
260 
261 		cmd->sense_phys_addr1 = (io_req_base_phys +
262 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
263 		    (i * SENSE_LENGTH));
264 
265 
266 		cmd->SMID = i + 1;
267 
268 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
269 		    cmd->index, (void *)cmd->scsi_io_request));
270 
271 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
272 		    cmd->index, cmd->scsi_io_request_phys_addr));
273 
274 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
275 		    cmd->index, (void *)cmd->sense1));
276 
277 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
278 		    cmd->index, cmd->sense_phys_addr1));
279 
280 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
281 		    cmd->index, (void *)cmd->sgl));
282 
283 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
284 		    cmd->index, cmd->sgl_phys_addr));
285 	}
286 
287 	return (DDI_SUCCESS);
288 
289 }
290 
291 
292 /*
293  * alloc_additional_dma_buffer for AEN
294  */
295 int
mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance * instance)296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
297 {
298 	uint32_t	internal_buf_size = PAGESIZE*2;
299 	int i;
300 
301 	/* Initialize buffer status as free */
302 	instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
303 	instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
304 	instance->ld_map_obj[0].status = DMA_OBJ_FREED;
305 	instance->ld_map_obj[1].status = DMA_OBJ_FREED;
306 
307 
308 	instance->mfi_internal_dma_obj.size = internal_buf_size;
309 	instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
310 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
311 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
312 	    0xFFFFFFFFU;
313 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
314 
315 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
316 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
317 		dev_err(instance->dip, CE_WARN,
318 		    "could not alloc reply queue");
319 		return (DDI_FAILURE);
320 	}
321 
322 	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
323 
324 	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
325 	instance->internal_buf =
326 	    (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
327 	instance->internal_buf_dmac_add =
328 	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
329 	instance->internal_buf_size = internal_buf_size;
330 
331 	/* allocate evt_detail */
332 	instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
333 	instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
334 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
335 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
336 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
337 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
338 
339 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
340 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
341 		dev_err(instance->dip, CE_WARN,
342 		    "mrsas_tbolt_alloc_additional_dma_buffer: "
343 		    "could not allocate data transfer buffer.");
344 		goto fail_tbolt_additional_buff;
345 	}
346 
347 	bzero(instance->mfi_evt_detail_obj.buffer,
348 	    sizeof (struct mrsas_evt_detail));
349 
350 	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
351 
352 	instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
353 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
354 
355 	for (i = 0; i < 2; i++) {
356 		/* allocate the data transfer buffer */
357 		instance->ld_map_obj[i].size = instance->size_map_info;
358 		instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
359 		instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
360 		instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
361 		    0xFFFFFFFFU;
362 		instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
363 		instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
364 
365 		if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
366 		    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
367 			dev_err(instance->dip, CE_WARN,
368 			    "could not allocate data transfer buffer.");
369 			goto fail_tbolt_additional_buff;
370 		}
371 
372 		instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
373 
374 		bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
375 
376 		instance->ld_map[i] =
377 		    (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
378 		instance->ld_map_phy[i] = (uint32_t)instance->
379 		    ld_map_obj[i].dma_cookie[0].dmac_address;
380 
381 		con_log(CL_DLEVEL3, (CE_NOTE,
382 		    "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
383 
384 		con_log(CL_DLEVEL3, (CE_NOTE,
385 		    "size_map_info 0x%x", instance->size_map_info));
386 	}
387 
388 	return (DDI_SUCCESS);
389 
390 fail_tbolt_additional_buff:
391 	mrsas_tbolt_free_additional_dma_buffer(instance);
392 
393 	return (DDI_FAILURE);
394 }
395 
396 MRSAS_REQUEST_DESCRIPTOR_UNION *
mr_sas_get_request_descriptor(struct mrsas_instance * instance,uint16_t index)397 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
398 {
399 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
400 
401 	if (index > instance->max_fw_cmds) {
402 		con_log(CL_ANN1, (CE_NOTE,
403 		    "Invalid SMID 0x%x request for descriptor", index));
404 		con_log(CL_ANN1, (CE_NOTE,
405 		    "max_fw_cmds : 0x%x", instance->max_fw_cmds));
406 		return (NULL);
407 	}
408 
409 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
410 	    ((char *)instance->request_message_pool +
411 	    (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
412 
413 	con_log(CL_ANN1, (CE_NOTE,
414 	    "request descriptor : 0x%08lx", (unsigned long)req_desc));
415 
416 	con_log(CL_ANN1, (CE_NOTE,
417 	    "request descriptor base phy : 0x%08lx",
418 	    (unsigned long)instance->request_message_pool_phy));
419 
420 	return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
421 }
422 
423 
424 /*
425  * Allocate Request and Reply  Queue Descriptors.
426  */
427 int
alloc_req_rep_desc(struct mrsas_instance * instance)428 alloc_req_rep_desc(struct mrsas_instance *instance)
429 {
430 	uint32_t	request_q_sz, reply_q_sz;
431 	int		i, max_reply_q_sz;
432 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
433 
434 	/*
435 	 * ThunderBolt(TB) There's no longer producer consumer mechanism.
436 	 * Once we have an interrupt we are supposed to scan through the list of
437 	 * reply descriptors and process them accordingly. We would be needing
438 	 * to allocate memory for 1024 reply descriptors
439 	 */
440 
441 	/* Allocate Reply Descriptors */
442 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
443 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
444 
445 	/* reply queue size should be multiple of 16 */
446 	max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
447 
448 	reply_q_sz = 8 * max_reply_q_sz;
449 
450 
451 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
452 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
453 
454 	instance->reply_desc_dma_obj.size = reply_q_sz;
455 	instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
456 	instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
457 	instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
458 	instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
459 	instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
460 
461 	if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
462 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
463 		dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
464 		return (DDI_FAILURE);
465 	}
466 
467 	bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
468 	instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
469 
470 	/* virtual address of  reply queue */
471 	instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
472 	    instance->reply_desc_dma_obj.buffer);
473 
474 	instance->reply_q_depth = max_reply_q_sz;
475 
476 	con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
477 	    instance->reply_q_depth));
478 
479 	con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
480 	    (void *)instance->reply_frame_pool));
481 
482 	/* initializing reply address to 0xFFFFFFFF */
483 	reply_desc = instance->reply_frame_pool;
484 
485 	for (i = 0; i < instance->reply_q_depth; i++) {
486 		reply_desc->Words = (uint64_t)~0;
487 		reply_desc++;
488 	}
489 
490 
491 	instance->reply_frame_pool_phy =
492 	    (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
493 
494 	con_log(CL_ANN1, (CE_NOTE,
495 	    "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
496 
497 
498 	instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
499 	    reply_q_sz);
500 
501 	con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
502 	    instance->reply_pool_limit_phy));
503 
504 
505 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
506 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 
508 	/* Allocate Request Descriptors */
509 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
510 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
511 
512 	request_q_sz = 8 *
513 	    (instance->max_fw_cmds);
514 
515 	instance->request_desc_dma_obj.size = request_q_sz;
516 	instance->request_desc_dma_obj.dma_attr	= mrsas_generic_dma_attr;
517 	instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
518 	instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
519 	    0xFFFFFFFFU;
520 	instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen	= 1;
521 	instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
522 
523 	if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
524 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
525 		dev_err(instance->dip, CE_WARN,
526 		    "could not alloc request queue desc");
527 		goto fail_undo_reply_queue;
528 	}
529 
530 	bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
531 	instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
532 
533 	/* virtual address of  request queue desc */
534 	instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
535 	    (instance->request_desc_dma_obj.buffer);
536 
537 	instance->request_message_pool_phy =
538 	    (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
539 
540 	return (DDI_SUCCESS);
541 
542 fail_undo_reply_queue:
543 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
544 		(void) mrsas_free_dma_obj(instance,
545 		    instance->reply_desc_dma_obj);
546 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
547 	}
548 
549 	return (DDI_FAILURE);
550 }
551 
552 /*
553  * mrsas_alloc_cmd_pool_tbolt
554  *
555  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
556  * routine
557  */
558 int
mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance * instance)559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
560 {
561 	int		i;
562 	int		count;
563 	uint32_t	max_cmd;
564 	uint32_t	reserve_cmd;
565 	size_t		sz;
566 
567 	struct mrsas_cmd	*cmd;
568 
569 	max_cmd = instance->max_fw_cmds;
570 	con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
571 	    "max_cmd %x", max_cmd));
572 
573 
574 	sz = sizeof (struct mrsas_cmd *) * max_cmd;
575 
576 	/*
577 	 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 	 * Allocate the dynamic array first and then allocate individual
579 	 * commands.
580 	 */
581 	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
582 
583 	/* create a frame pool and assign one frame to each cmd */
584 	for (count = 0; count < max_cmd; count++) {
585 		instance->cmd_list[count] =
586 		    kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
587 	}
588 
589 	/* add all the commands to command pool */
590 
591 	INIT_LIST_HEAD(&instance->cmd_pool_list);
592 	INIT_LIST_HEAD(&instance->cmd_pend_list);
593 	INIT_LIST_HEAD(&instance->cmd_app_pool_list);
594 
595 	reserve_cmd = MRSAS_APP_RESERVED_CMDS;
596 
597 	/* cmd index 0 reservered for IOC INIT */
598 	for (i = 1; i < reserve_cmd; i++) {
599 		cmd		= instance->cmd_list[i];
600 		cmd->index	= i;
601 		mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
602 	}
603 
604 
605 	for (i = reserve_cmd; i < max_cmd; i++) {
606 		cmd		= instance->cmd_list[i];
607 		cmd->index	= i;
608 		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
609 	}
610 
611 	return (DDI_SUCCESS);
612 
613 mrsas_undo_cmds:
614 	if (count > 0) {
615 		/* free each cmd */
616 		for (i = 0; i < count; i++) {
617 			if (instance->cmd_list[i] != NULL) {
618 				kmem_free(instance->cmd_list[i],
619 				    sizeof (struct mrsas_cmd));
620 			}
621 			instance->cmd_list[i] = NULL;
622 		}
623 	}
624 
625 mrsas_undo_cmd_list:
626 	if (instance->cmd_list != NULL)
627 		kmem_free(instance->cmd_list, sz);
628 	instance->cmd_list = NULL;
629 
630 	return (DDI_FAILURE);
631 }
632 
633 
634 /*
635  * free_space_for_mpi2
636  */
637 void
free_space_for_mpi2(struct mrsas_instance * instance)638 free_space_for_mpi2(struct mrsas_instance *instance)
639 {
640 	/* already freed */
641 	if (instance->cmd_list == NULL) {
642 		return;
643 	}
644 
645 	/* First free the additional DMA buffer */
646 	mrsas_tbolt_free_additional_dma_buffer(instance);
647 
648 	/* Free the request/reply descriptor pool */
649 	free_req_rep_desc_pool(instance);
650 
651 	/*  Free the MPI message pool */
652 	destroy_mpi2_frame_pool(instance);
653 
654 	/* Free the MFI frame pool */
655 	destroy_mfi_frame_pool(instance);
656 
657 	/* Free all the commands in the cmd_list */
658 	/* Free the cmd_list buffer itself */
659 	mrsas_free_cmd_pool(instance);
660 }
661 
662 
663 /*
664  * ThunderBolt(TB) memory allocations for commands/messages/frames.
665  */
666 int
alloc_space_for_mpi2(struct mrsas_instance * instance)667 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 {
669 	/* Allocate command pool (memory for cmd_list & individual commands) */
670 	if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 		dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
672 		return (DDI_FAILURE);
673 	}
674 
675 	/* Initialize single reply size and Message size */
676 	instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 	instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 
679 	instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 	    (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 	    sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 	instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 	    MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 
685 	/* Reduce SG count by 1 to take care of group cmds feature in FW */
686 	instance->max_num_sge = (instance->max_sge_in_main_msg +
687 	    instance->max_sge_in_chain - 2);
688 	instance->chain_offset_mpt_msg =
689 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 	instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 	    sizeof (MPI2_SGE_IO_UNION)) / 16;
692 	instance->reply_read_index = 0;
693 
694 
695 	/* Allocate Request and Reply descriptors Array */
696 	/* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
697 	if (alloc_req_rep_desc(instance)) {
698 		dev_err(instance->dip, CE_WARN,
699 		    "Error, allocating memory for descripter-pool");
700 		goto mpi2_undo_cmd_pool;
701 	}
702 	con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 	    instance->request_message_pool_phy));
704 
705 
706 	/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 	if (create_mfi_frame_pool(instance)) {
708 		dev_err(instance->dip, CE_WARN,
709 		    "Error, allocating memory for MFI frame-pool");
710 		goto mpi2_undo_descripter_pool;
711 	}
712 
713 
714 	/* Allocate MPI2 Message pool */
715 	/*
716 	 * Make sure the buffer is alligned to 256 for raid message packet
717 	 * create a io request pool and assign one frame to each cmd
718 	 */
719 
720 	if (create_mpi2_frame_pool(instance)) {
721 		dev_err(instance->dip, CE_WARN,
722 		    "Error, allocating memory for MPI2 Message-pool");
723 		goto mpi2_undo_mfi_frame_pool;
724 	}
725 
726 #ifdef DEBUG
727 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 	    instance->max_sge_in_main_msg));
729 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 	    instance->max_sge_in_chain));
731 	con_log(CL_ANN1, (CE_CONT,
732 	    "[max_sge]0x%x", instance->max_num_sge));
733 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 	    instance->chain_offset_mpt_msg));
735 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 	    instance->chain_offset_io_req));
737 #endif
738 
739 
740 	/* Allocate additional dma buffer */
741 	if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 		dev_err(instance->dip, CE_WARN,
743 		    "Error, allocating tbolt additional DMA buffer");
744 		goto mpi2_undo_message_pool;
745 	}
746 
747 	return (DDI_SUCCESS);
748 
749 mpi2_undo_message_pool:
750 	destroy_mpi2_frame_pool(instance);
751 
752 mpi2_undo_mfi_frame_pool:
753 	destroy_mfi_frame_pool(instance);
754 
755 mpi2_undo_descripter_pool:
756 	free_req_rep_desc_pool(instance);
757 
758 mpi2_undo_cmd_pool:
759 	mrsas_free_cmd_pool(instance);
760 
761 	return (DDI_FAILURE);
762 }
763 
764 
765 /*
766  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767  */
768 int
mrsas_init_adapter_tbolt(struct mrsas_instance * instance)769 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
770 {
771 
772 	/*
773 	 * Reduce the max supported cmds by 1. This is to ensure that the
774 	 * reply_q_sz (1 more than the max cmd that driver may send)
775 	 * does not exceed max cmds that the FW can support
776 	 */
777 
778 	if (instance->max_fw_cmds > 1008) {
779 		instance->max_fw_cmds = 1008;
780 		instance->max_fw_cmds = instance->max_fw_cmds-1;
781 	}
782 
783 	con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 	    "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 
786 
787 	/* create a pool of commands */
788 	if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 		dev_err(instance->dip, CE_WARN,
790 		    "alloc_space_for_mpi2() failed.");
791 
792 		return (DDI_FAILURE);
793 	}
794 
795 	/* Send ioc init message */
796 	/* NOTE: the issue_init call does FMA checking already. */
797 	if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
798 		dev_err(instance->dip, CE_WARN,
799 		    "mrsas_issue_init_mpi2() failed.");
800 
801 		goto fail_init_fusion;
802 	}
803 
804 	instance->unroll.alloc_space_mpi2 = 1;
805 
806 	con_log(CL_ANN, (CE_NOTE,
807 	    "mrsas_init_adapter_tbolt: SUCCESSFUL"));
808 
809 	return (DDI_SUCCESS);
810 
811 fail_init_fusion:
812 	free_space_for_mpi2(instance);
813 
814 	return (DDI_FAILURE);
815 }
816 
817 
818 
819 /*
820  * init_mpi2
821  */
822 int
mrsas_issue_init_mpi2(struct mrsas_instance * instance)823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
824 {
825 	dma_obj_t init2_dma_obj;
826 	int ret_val = DDI_SUCCESS;
827 
828 	/* allocate DMA buffer for IOC INIT message */
829 	init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
830 	init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
831 	init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
832 	init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
833 	init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
834 	init2_dma_obj.dma_attr.dma_attr_align = 256;
835 
836 	if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
837 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
838 		dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
839 		    "could not allocate data transfer buffer.");
840 		return (DDI_FAILURE);
841 	}
842 	(void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
843 
844 	con_log(CL_ANN1, (CE_NOTE,
845 	    "mrsas_issue_init_mpi2 _phys adr: %x",
846 	    init2_dma_obj.dma_cookie[0].dmac_address));
847 
848 
849 	/* Initialize and send ioc init message */
850 	ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
851 	if (ret_val == DDI_FAILURE) {
852 		con_log(CL_ANN1, (CE_WARN,
853 		    "mrsas_issue_init_mpi2: Failed"));
854 		goto fail_init_mpi2;
855 	}
856 
857 	/* free IOC init DMA buffer */
858 	if (mrsas_free_dma_obj(instance, init2_dma_obj)
859 	    != DDI_SUCCESS) {
860 		con_log(CL_ANN1, (CE_WARN,
861 		    "mrsas_issue_init_mpi2: Free Failed"));
862 		return (DDI_FAILURE);
863 	}
864 
865 	/* Get/Check and sync ld_map info */
866 	instance->map_id = 0;
867 	if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
868 		(void) mrsas_tbolt_sync_map_info(instance);
869 
870 
871 	/* No mrsas_cmd to send, so send NULL. */
872 	if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
873 		goto fail_init_mpi2;
874 
875 	con_log(CL_ANN, (CE_NOTE,
876 	    "mrsas_issue_init_mpi2: SUCCESSFUL"));
877 
878 	return (DDI_SUCCESS);
879 
880 fail_init_mpi2:
881 	(void) mrsas_free_dma_obj(instance, init2_dma_obj);
882 
883 	return (DDI_FAILURE);
884 }
885 
886 static int
mrsas_tbolt_ioc_init(struct mrsas_instance * instance,dma_obj_t * mpi2_dma_obj)887 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
888 {
889 	int				numbytes;
890 	uint16_t			flags;
891 	struct mrsas_init_frame2	*mfiFrameInit2;
892 	struct mrsas_header		*frame_hdr;
893 	Mpi2IOCInitRequest_t		*init;
894 	struct mrsas_cmd		*cmd = NULL;
895 	struct mrsas_drv_ver		drv_ver_info;
896 	MRSAS_REQUEST_DESCRIPTOR_UNION	req_desc;
897 	uint32_t			timeout;
898 
899 	con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
900 
901 
902 #ifdef DEBUG
903 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
904 	    (int)sizeof (*mfiFrameInit2)));
905 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
906 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
907 	    (int)sizeof (struct mrsas_init_frame2)));
908 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
909 	    (int)sizeof (Mpi2IOCInitRequest_t)));
910 #endif
911 
912 	init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
913 	numbytes = sizeof (*init);
914 	bzero(init, numbytes);
915 
916 	ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
917 	    MPI2_FUNCTION_IOC_INIT);
918 
919 	ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
920 	    MPI2_WHOINIT_HOST_DRIVER);
921 
922 	/* set MsgVersion and HeaderVersion host driver was built with */
923 	ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
924 	    MPI2_VERSION);
925 
926 	ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
927 	    MPI2_HEADER_VERSION);
928 
929 	ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
930 	    instance->raid_io_msg_size / 4);
931 
932 	ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
933 	    0);
934 
935 	ddi_put16(mpi2_dma_obj->acc_handle,
936 	    &init->ReplyDescriptorPostQueueDepth,
937 	    instance->reply_q_depth);
938 	/*
939 	 * These addresses are set using the DMA cookie addresses from when the
940 	 * memory was allocated.  Sense buffer hi address should be 0.
941 	 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
942 	 */
943 
944 	ddi_put32(mpi2_dma_obj->acc_handle,
945 	    &init->SenseBufferAddressHigh, 0);
946 
947 	ddi_put64(mpi2_dma_obj->acc_handle,
948 	    (uint64_t *)&init->SystemRequestFrameBaseAddress,
949 	    instance->io_request_frames_phy);
950 
951 	ddi_put64(mpi2_dma_obj->acc_handle,
952 	    &init->ReplyDescriptorPostQueueAddress,
953 	    instance->reply_frame_pool_phy);
954 
955 	ddi_put64(mpi2_dma_obj->acc_handle,
956 	    &init->ReplyFreeQueueAddress, 0);
957 
958 	cmd = instance->cmd_list[0];
959 	if (cmd == NULL) {
960 		return (DDI_FAILURE);
961 	}
962 	cmd->retry_count_for_ocr = 0;
963 	cmd->pkt = NULL;
964 	cmd->drv_pkt_time = 0;
965 
966 	mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
967 	con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
968 
969 	frame_hdr = &cmd->frame->hdr;
970 
971 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
972 	    MFI_CMD_STATUS_POLL_MODE);
973 
974 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
975 
976 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
977 
978 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
979 
980 	con_log(CL_ANN, (CE_CONT,
981 	    "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
982 
983 	/* Init the MFI Header */
984 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
985 	    &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
986 
987 	con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
988 
989 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 	    &mfiFrameInit2->cmd_status,
991 	    MFI_STAT_INVALID_STATUS);
992 
993 	con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
994 
995 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
996 	    &mfiFrameInit2->queue_info_new_phys_addr_lo,
997 	    mpi2_dma_obj->dma_cookie[0].dmac_address);
998 
999 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1000 	    &mfiFrameInit2->data_xfer_len,
1001 	    sizeof (Mpi2IOCInitRequest_t));
1002 
1003 	con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1004 	    (int)init->ReplyDescriptorPostQueueAddress));
1005 
1006 	/* fill driver version information */
1007 	fill_up_drv_ver(&drv_ver_info);
1008 
1009 	/* allocate the driver version data transfer buffer */
1010 	instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1011 	instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1012 	instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1013 	instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1014 	instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1015 	instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1016 
1017 	if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1018 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1019 		dev_err(instance->dip, CE_WARN,
1020 		    "fusion init: Could not allocate driver version buffer.");
1021 		return (DDI_FAILURE);
1022 	}
1023 	/* copy driver version to dma buffer */
1024 	bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1025 	ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1026 	    (uint8_t *)drv_ver_info.drv_ver,
1027 	    (uint8_t *)instance->drv_ver_dma_obj.buffer,
1028 	    sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1029 
1030 	/* send driver version physical address to firmware */
1031 	ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1032 	    instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1033 
1034 	con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1035 	    mfiFrameInit2->queue_info_new_phys_addr_lo,
1036 	    (int)sizeof (Mpi2IOCInitRequest_t)));
1037 
1038 	con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1039 
1040 	con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1041 	    cmd->scsi_io_request_phys_addr,
1042 	    (int)sizeof (struct mrsas_init_frame2)));
1043 
1044 	/* disable interrupts before sending INIT2 frame */
1045 	instance->func_ptr->disable_intr(instance);
1046 
1047 	req_desc.Words = cmd->scsi_io_request_phys_addr;
1048 	req_desc.MFAIo.RequestFlags =
1049 	    (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1050 
1051 	cmd->request_desc = &req_desc;
1052 
1053 	/* issue the init frame */
1054 
1055 	mutex_enter(&instance->reg_write_mtx);
1056 	WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1057 	WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1058 	mutex_exit(&instance->reg_write_mtx);
1059 
1060 	con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1061 	con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1062 	    frame_hdr->cmd_status));
1063 
1064 	timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1065 	do {
1066 		if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1067 		    &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1068 			break;
1069 		delay(1);
1070 		timeout--;
1071 	} while (timeout > 0);
1072 
1073 	if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1074 	    &mfiFrameInit2->cmd_status) == 0) {
1075 		con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1076 	} else {
1077 		con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1078 		mrsas_dump_reply_desc(instance);
1079 		goto fail_ioc_init;
1080 	}
1081 
1082 	mrsas_dump_reply_desc(instance);
1083 
1084 	instance->unroll.verBuff = 1;
1085 
1086 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1087 
1088 	return (DDI_SUCCESS);
1089 
1090 
1091 fail_ioc_init:
1092 
1093 	(void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1094 
1095 	return (DDI_FAILURE);
1096 }
1097 
1098 int
wait_for_outstanding_poll_io(struct mrsas_instance * instance)1099 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1100 {
1101 	int i;
1102 	uint32_t wait_time = dump_io_wait_time;
1103 	for (i = 0; i < wait_time; i++) {
1104 		/*
1105 		 * Check For Outstanding poll Commands
1106 		 * except ldsync command and aen command
1107 		 */
1108 		if (instance->fw_outstanding <= 2) {
1109 			break;
1110 		}
1111 		drv_usecwait(MILLISEC);
1112 		/* complete commands from reply queue */
1113 		(void) mr_sas_tbolt_process_outstanding_cmd(instance);
1114 	}
1115 	if (instance->fw_outstanding > 2) {
1116 		return (1);
1117 	}
1118 	return (0);
1119 }
1120 /*
1121  * scsi_pkt handling
1122  *
1123  * Visible to the external world via the transport structure.
1124  */
1125 
1126 int
mrsas_tbolt_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1127 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1128 {
1129 	struct mrsas_instance	*instance = ADDR2MR(ap);
1130 	struct scsa_cmd		*acmd = PKT2CMD(pkt);
1131 	struct mrsas_cmd	*cmd = NULL;
1132 	uchar_t			cmd_done = 0;
1133 
1134 	con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1135 	if (instance->deadadapter == 1) {
1136 		dev_err(instance->dip, CE_WARN,
1137 		    "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1138 		    "for IO, as the HBA doesnt take any more IOs");
1139 		if (pkt) {
1140 			pkt->pkt_reason		= CMD_DEV_GONE;
1141 			pkt->pkt_statistics	= STAT_DISCON;
1142 		}
1143 		return (TRAN_FATAL_ERROR);
1144 	}
1145 	if (instance->adapterresetinprogress) {
1146 		con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1147 		    "returning mfi_pkt and setting TRAN_BUSY\n"));
1148 		return (TRAN_BUSY);
1149 	}
1150 	(void) mrsas_tbolt_prepare_pkt(acmd);
1151 
1152 	cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1153 
1154 	/*
1155 	 * Check if the command is already completed by the mrsas_build_cmd()
1156 	 * routine. In which case the busy_flag would be clear and scb will be
1157 	 * NULL and appropriate reason provided in pkt_reason field
1158 	 */
1159 	if (cmd_done) {
1160 		pkt->pkt_reason = CMD_CMPLT;
1161 		pkt->pkt_scbp[0] = STATUS_GOOD;
1162 		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1163 		    | STATE_SENT_CMD;
1164 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1165 			(*pkt->pkt_comp)(pkt);
1166 		}
1167 
1168 		return (TRAN_ACCEPT);
1169 	}
1170 
1171 	if (cmd == NULL) {
1172 		return (TRAN_BUSY);
1173 	}
1174 
1175 
1176 	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1177 		if (instance->fw_outstanding > instance->max_fw_cmds) {
1178 			dev_err(instance->dip, CE_WARN,
1179 			    "Command Queue Full... Returning BUSY");
1180 			DTRACE_PROBE2(tbolt_start_tran_err,
1181 			    uint16_t, instance->fw_outstanding,
1182 			    uint16_t, instance->max_fw_cmds);
1183 			return_raid_msg_pkt(instance, cmd);
1184 			return (TRAN_BUSY);
1185 		}
1186 
1187 		/* Synchronize the Cmd frame for the controller */
1188 		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1189 		    DDI_DMA_SYNC_FORDEV);
1190 
1191 		con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1192 		    "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1193 		    cmd->index, cmd->SMID));
1194 
1195 		instance->func_ptr->issue_cmd(cmd, instance);
1196 	} else {
1197 		instance->func_ptr->issue_cmd(cmd, instance);
1198 		(void) wait_for_outstanding_poll_io(instance);
1199 		(void) mrsas_common_check(instance, cmd);
1200 		DTRACE_PROBE2(tbolt_start_nointr_done,
1201 		    uint8_t, cmd->frame->hdr.cmd,
1202 		    uint8_t, cmd->frame->hdr.cmd_status);
1203 	}
1204 
1205 	return (TRAN_ACCEPT);
1206 }
1207 
1208 /*
1209  * prepare the pkt:
1210  * the pkt may have been resubmitted or just reused so
1211  * initialize some fields and do some checks.
1212  */
1213 static int
mrsas_tbolt_prepare_pkt(struct scsa_cmd * acmd)1214 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1215 {
1216 	struct scsi_pkt	*pkt = CMD2PKT(acmd);
1217 
1218 
1219 	/*
1220 	 * Reinitialize some fields that need it; the packet may
1221 	 * have been resubmitted
1222 	 */
1223 	pkt->pkt_reason = CMD_CMPLT;
1224 	pkt->pkt_state = 0;
1225 	pkt->pkt_statistics = 0;
1226 	pkt->pkt_resid = 0;
1227 
1228 	/*
1229 	 * zero status byte.
1230 	 */
1231 	*(pkt->pkt_scbp) = 0;
1232 
1233 	return (0);
1234 }
1235 
1236 
1237 int
mr_sas_tbolt_build_sgl(struct mrsas_instance * instance,struct scsa_cmd * acmd,struct mrsas_cmd * cmd,Mpi2RaidSCSIIORequest_t * scsi_raid_io,uint32_t * datalen)1238 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1239     struct scsa_cmd *acmd,
1240     struct mrsas_cmd *cmd,
1241     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1242     uint32_t *datalen)
1243 {
1244 	uint32_t		MaxSGEs;
1245 	int			sg_to_process;
1246 	uint32_t		i, j;
1247 	uint32_t		numElements, endElement;
1248 	Mpi25IeeeSgeChain64_t	*ieeeChainElement = NULL;
1249 	Mpi25IeeeSgeChain64_t	*scsi_raid_io_sgl_ieee = NULL;
1250 	ddi_acc_handle_t acc_handle =
1251 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1252 
1253 	con_log(CL_ANN1, (CE_NOTE,
1254 	    "chkpnt: Building Chained SGL :%d", __LINE__));
1255 
1256 	/* Calulate SGE size in number of Words(32bit) */
1257 	/* Clear the datalen before updating it. */
1258 	*datalen = 0;
1259 
1260 	MaxSGEs = instance->max_sge_in_main_msg;
1261 
1262 	ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1263 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1264 
1265 	/* set data transfer flag. */
1266 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1267 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1268 		    MPI2_SCSIIO_CONTROL_WRITE);
1269 	} else {
1270 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1271 		    MPI2_SCSIIO_CONTROL_READ);
1272 	}
1273 
1274 
1275 	numElements = acmd->cmd_cookiecnt;
1276 
1277 	con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1278 
1279 	if (numElements > instance->max_num_sge) {
1280 		con_log(CL_ANN, (CE_NOTE,
1281 		    "[Max SGE Count Exceeded]:%x", numElements));
1282 		return (numElements);
1283 	}
1284 
1285 	ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1286 	    (uint8_t)numElements);
1287 
1288 	/* set end element in main message frame */
1289 	endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1290 
1291 	/* prepare the scatter-gather list for the firmware */
1292 	scsi_raid_io_sgl_ieee =
1293 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1294 
1295 	if (instance->gen3) {
1296 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1297 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1298 
1299 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1300 	}
1301 
1302 	for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1303 		ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1304 		    acmd->cmd_dmacookies[i].dmac_laddress);
1305 
1306 		ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1307 		    acmd->cmd_dmacookies[i].dmac_size);
1308 
1309 		ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1310 
1311 		if (instance->gen3) {
1312 			if (i == (numElements - 1)) {
1313 				ddi_put8(acc_handle,
1314 				    &scsi_raid_io_sgl_ieee->Flags,
1315 				    IEEE_SGE_FLAGS_END_OF_LIST);
1316 			}
1317 		}
1318 
1319 		*datalen += acmd->cmd_dmacookies[i].dmac_size;
1320 
1321 #ifdef DEBUG
1322 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1323 		    scsi_raid_io_sgl_ieee->Address));
1324 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1325 		    scsi_raid_io_sgl_ieee->Length));
1326 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1327 		    scsi_raid_io_sgl_ieee->Flags));
1328 #endif
1329 
1330 	}
1331 
1332 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1333 
1334 	/* check if chained SGL required */
1335 	if (i < numElements) {
1336 
1337 		con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1338 
1339 		if (instance->gen3) {
1340 			uint16_t ioFlags =
1341 			    ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1342 
1343 			if ((ioFlags &
1344 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1345 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1346 				ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1347 				    (U8)instance->chain_offset_io_req);
1348 			} else {
1349 				ddi_put8(acc_handle,
1350 				    &scsi_raid_io->ChainOffset, 0);
1351 			}
1352 		} else {
1353 			ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1354 			    (U8)instance->chain_offset_io_req);
1355 		}
1356 
1357 		/* prepare physical chain element */
1358 		ieeeChainElement = scsi_raid_io_sgl_ieee;
1359 
1360 		ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1361 
1362 		if (instance->gen3) {
1363 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1364 			    IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1365 		} else {
1366 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1367 			    (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1368 			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1369 		}
1370 
1371 		ddi_put32(acc_handle, &ieeeChainElement->Length,
1372 		    (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1373 
1374 		ddi_put64(acc_handle, &ieeeChainElement->Address,
1375 		    (U64)cmd->sgl_phys_addr);
1376 
1377 		sg_to_process = numElements - i;
1378 
1379 		con_log(CL_ANN1, (CE_NOTE,
1380 		    "[Additional SGE Count]:%x", endElement));
1381 
1382 		/* point to the chained SGL buffer */
1383 		scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1384 
1385 		/* build rest of the SGL in chained buffer */
1386 		for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1387 			con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1388 
1389 			ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1390 			    acmd->cmd_dmacookies[i].dmac_laddress);
1391 
1392 			ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1393 			    acmd->cmd_dmacookies[i].dmac_size);
1394 
1395 			ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1396 
1397 			if (instance->gen3) {
1398 				if (i == (numElements - 1)) {
1399 					ddi_put8(acc_handle,
1400 					    &scsi_raid_io_sgl_ieee->Flags,
1401 					    IEEE_SGE_FLAGS_END_OF_LIST);
1402 				}
1403 			}
1404 
1405 			*datalen += acmd->cmd_dmacookies[i].dmac_size;
1406 
1407 #if DEBUG
1408 			con_log(CL_DLEVEL1, (CE_NOTE,
1409 			    "[SGL Address]: %" PRIx64,
1410 			    scsi_raid_io_sgl_ieee->Address));
1411 			con_log(CL_DLEVEL1, (CE_NOTE,
1412 			    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 			con_log(CL_DLEVEL1, (CE_NOTE,
1414 			    "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416 
1417 			i++;
1418 		}
1419 	}
1420 
1421 	return (0);
1422 } /*end of BuildScatterGather */
1423 
1424 
1425 /*
1426  * build_cmd
1427  */
1428 static struct mrsas_cmd *
mrsas_tbolt_build_cmd(struct mrsas_instance * instance,struct scsi_address * ap,struct scsi_pkt * pkt,uchar_t * cmd_done)1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430     struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 {
1432 	uint8_t		fp_possible = 0;
1433 	uint32_t	index;
1434 	uint32_t	lba_count = 0;
1435 	uint32_t	start_lba_hi = 0;
1436 	uint32_t	start_lba_lo = 0;
1437 	ddi_acc_handle_t acc_handle =
1438 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1439 	struct mrsas_cmd		*cmd = NULL;
1440 	struct scsa_cmd			*acmd = PKT2CMD(pkt);
1441 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
1442 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
1443 	uint32_t			datalen;
1444 	struct IO_REQUEST_INFO io_info;
1445 	MR_FW_RAID_MAP_ALL *local_map_ptr;
1446 	uint16_t pd_cmd_cdblen;
1447 
1448 	con_log(CL_DLEVEL1, (CE_NOTE,
1449 	    "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1450 
1451 	/* find out if this is logical or physical drive command.  */
1452 	acmd->islogical = MRDRV_IS_LOGICAL(ap);
1453 	acmd->device_id = MAP_DEVICE_ID(instance, ap);
1454 
1455 	*cmd_done = 0;
1456 
1457 	/* get the command packet */
1458 	if (!(cmd = get_raid_msg_pkt(instance))) {
1459 		DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1460 		    instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1461 		return (NULL);
1462 	}
1463 
1464 	index = cmd->index;
1465 	ReqDescUnion =	mr_sas_get_request_descriptor(instance, index);
1466 	ReqDescUnion->Words = 0;
1467 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1468 	ReqDescUnion->SCSIIO.RequestFlags =
1469 	    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1470 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1471 
1472 
1473 	cmd->request_desc = ReqDescUnion;
1474 	cmd->pkt = pkt;
1475 	cmd->cmd = acmd;
1476 
1477 	DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1478 	    ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1479 	    uint16_t, acmd->device_id);
1480 
1481 	/* lets get the command directions */
1482 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1483 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1484 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1485 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1486 			    DDI_DMA_SYNC_FORDEV);
1487 		}
1488 	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1489 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1490 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1491 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1492 			    DDI_DMA_SYNC_FORCPU);
1493 		}
1494 	} else {
1495 		con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1496 	}
1497 
1498 
1499 	/* get SCSI_IO raid message frame pointer */
1500 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1501 
1502 	/* zero out SCSI_IO raid message frame */
1503 	bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1504 
1505 	/* Set the ldTargetId set by BuildRaidContext() */
1506 	ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1507 	    acmd->device_id);
1508 
1509 	/*  Copy CDB to scsi_io_request message frame */
1510 	ddi_rep_put8(acc_handle,
1511 	    (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1512 	    acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1513 
1514 	/*
1515 	 * Just the CDB length, rest of the Flags are zero
1516 	 * This will be modified later.
1517 	 */
1518 	ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1519 
1520 	pd_cmd_cdblen = acmd->cmd_cdblen;
1521 
1522 	if (acmd->islogical) {
1523 
1524 		switch (pkt->pkt_cdbp[0]) {
1525 		case SCMD_READ:
1526 		case SCMD_WRITE:
1527 		case SCMD_READ_G1:
1528 		case SCMD_WRITE_G1:
1529 		case SCMD_READ_G4:
1530 		case SCMD_WRITE_G4:
1531 		case SCMD_READ_G5:
1532 		case SCMD_WRITE_G5:
1533 
1534 			/* Initialize sense Information */
1535 			if (cmd->sense1 == NULL) {
1536 				con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1537 				    "Sense buffer ptr NULL "));
1538 			}
1539 			bzero(cmd->sense1, SENSE_LENGTH);
1540 			con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1541 			    "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1542 
1543 			if (acmd->cmd_cdblen == CDB_GROUP0) {
1544 				/* 6-byte cdb */
1545 				lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1546 				start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1547 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1548 				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1549 				    << 16));
1550 			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
1551 				/* 10-byte cdb */
1552 				lba_count =
1553 				    (((uint16_t)(pkt->pkt_cdbp[8])) |
1554 				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1555 
1556 				start_lba_lo =
1557 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1558 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1559 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1560 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1561 
1562 			} else if (acmd->cmd_cdblen == CDB_GROUP5) {
1563 				/* 12-byte cdb */
1564 				lba_count = (
1565 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1566 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1567 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1568 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1569 
1570 				start_lba_lo =
1571 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1572 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1573 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1574 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1575 
1576 			} else if (acmd->cmd_cdblen == CDB_GROUP4) {
1577 				/* 16-byte cdb */
1578 				lba_count = (
1579 				    ((uint32_t)(pkt->pkt_cdbp[13])) |
1580 				    ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581 				    ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582 				    ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583 
1584 				start_lba_lo = (
1585 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1586 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589 
1590 				start_lba_hi = (
1591 				    ((uint32_t)(pkt->pkt_cdbp[5])) |
1592 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595 			}
1596 
1597 			if (instance->tbolt &&
1598 			    ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1599 				dev_err(instance->dip, CE_WARN,
1600 				    "IO SECTOR COUNT exceeds "
1601 				    "controller limit 0x%x sectors",
1602 				    lba_count);
1603 			}
1604 
1605 			bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1606 			io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1607 			    start_lba_lo;
1608 			io_info.numBlocks = lba_count;
1609 			io_info.ldTgtId = acmd->device_id;
1610 
1611 			if (acmd->cmd_flags & CFLAG_DMASEND)
1612 				io_info.isRead = 0;
1613 			else
1614 				io_info.isRead = 1;
1615 
1616 
1617 			/* Acquire SYNC MAP UPDATE lock */
1618 			mutex_enter(&instance->sync_map_mtx);
1619 
1620 			local_map_ptr =
1621 			    instance->ld_map[(instance->map_id & 1)];
1622 
1623 			if ((MR_TargetIdToLdGet(
1624 			    acmd->device_id, local_map_ptr) >=
1625 			    MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1626 				dev_err(instance->dip, CE_NOTE,
1627 				    "Fast Path NOT Possible, "
1628 				    "targetId >= MAX_LOGICAL_DRIVES || "
1629 				    "!instance->fast_path_io");
1630 				fp_possible = 0;
1631 				/* Set Regionlock flags to BYPASS */
1632 				/* io_request->RaidContext.regLockFlags  = 0; */
1633 				ddi_put8(acc_handle,
1634 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1635 			} else {
1636 				if (MR_BuildRaidContext(instance, &io_info,
1637 				    &scsi_raid_io->RaidContext, local_map_ptr))
1638 					fp_possible = io_info.fpOkForIo;
1639 			}
1640 
1641 			if (!enable_fp)
1642 				fp_possible = 0;
1643 
1644 			con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1645 			    "instance->fast_path_io %d fp_possible %d",
1646 			    enable_fp, instance->fast_path_io, fp_possible));
1647 
1648 		if (fp_possible) {
1649 
1650 			/* Check for DIF enabled LD */
1651 			if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1652 				/* Prepare 32 Byte CDB for DIF capable Disk */
1653 				mrsas_tbolt_prepare_cdb(instance,
1654 				    scsi_raid_io->CDB.CDB32,
1655 				    &io_info, scsi_raid_io, start_lba_lo);
1656 			} else {
1657 				mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1658 				    sizeof (scsi_raid_io->CDB.CDB32),
1659 				    (uint8_t *)&pd_cmd_cdblen,
1660 				    io_info.pdBlock, io_info.numBlocks);
1661 				ddi_put16(acc_handle,
1662 				    &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1663 			}
1664 
1665 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1666 			    MPI2_FUNCTION_SCSI_IO_REQUEST);
1667 
1668 			ReqDescUnion->SCSIIO.RequestFlags =
1669 			    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1670 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1671 
1672 			if (instance->gen3) {
1673 				uint8_t regLockFlags = ddi_get8(acc_handle,
1674 				    &scsi_raid_io->RaidContext.regLockFlags);
1675 				uint16_t IoFlags = ddi_get16(acc_handle,
1676 				    &scsi_raid_io->IoFlags);
1677 
1678 				if (regLockFlags == REGION_TYPE_UNUSED)
1679 					ReqDescUnion->SCSIIO.RequestFlags =
1680 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1681 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1682 
1683 				IoFlags |=
1684 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1685 				regLockFlags |=
1686 				    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1687 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1688 
1689 				ddi_put8(acc_handle,
1690 				    &scsi_raid_io->ChainOffset, 0);
1691 				ddi_put8(acc_handle,
1692 				    &scsi_raid_io->RaidContext.nsegType,
1693 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1694 				    MPI2_TYPE_CUDA));
1695 				ddi_put8(acc_handle,
1696 				    &scsi_raid_io->RaidContext.regLockFlags,
1697 				    regLockFlags);
1698 				ddi_put16(acc_handle,
1699 				    &scsi_raid_io->IoFlags, IoFlags);
1700 			}
1701 
1702 			if ((instance->load_balance_info[
1703 			    acmd->device_id].loadBalanceFlag) &&
1704 			    (io_info.isRead)) {
1705 				io_info.devHandle =
1706 				    get_updated_dev_handle(&instance->
1707 				    load_balance_info[acmd->device_id],
1708 				    &io_info);
1709 				cmd->load_balance_flag |=
1710 				    MEGASAS_LOAD_BALANCE_FLAG;
1711 			} else {
1712 				cmd->load_balance_flag &=
1713 				    ~MEGASAS_LOAD_BALANCE_FLAG;
1714 			}
1715 
1716 			ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1717 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1718 			    io_info.devHandle);
1719 
1720 		} else { /* FP Not Possible */
1721 
1722 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1723 			    MPI2_FUNCTION_LD_IO_REQUEST);
1724 
1725 			ddi_put16(acc_handle,
1726 			    &scsi_raid_io->DevHandle, acmd->device_id);
1727 
1728 			ReqDescUnion->SCSIIO.RequestFlags =
1729 			    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1730 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1731 
1732 			ddi_put16(acc_handle,
1733 			    &scsi_raid_io->RaidContext.timeoutValue,
1734 			    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1735 
1736 			if (instance->gen3) {
1737 				uint8_t regLockFlags = ddi_get8(acc_handle,
1738 				    &scsi_raid_io->RaidContext.regLockFlags);
1739 
1740 				if (regLockFlags == REGION_TYPE_UNUSED) {
1741 					ReqDescUnion->SCSIIO.RequestFlags =
1742 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1743 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1744 				}
1745 
1746 				regLockFlags |=
1747 				    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1748 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1749 
1750 				ddi_put8(acc_handle,
1751 				    &scsi_raid_io->RaidContext.nsegType,
1752 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1753 				    MPI2_TYPE_CUDA));
1754 				ddi_put8(acc_handle,
1755 				    &scsi_raid_io->RaidContext.regLockFlags,
1756 				    regLockFlags);
1757 			}
1758 		} /* Not FP */
1759 
1760 		/* Release SYNC MAP UPDATE lock */
1761 		mutex_exit(&instance->sync_map_mtx);
1762 
1763 		break;
1764 
1765 		case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1766 			return_raid_msg_pkt(instance, cmd);
1767 			*cmd_done = 1;
1768 			return (NULL);
1769 		}
1770 
1771 		case SCMD_MODE_SENSE:
1772 		case SCMD_MODE_SENSE_G1: {
1773 			union scsi_cdb	*cdbp;
1774 			uint16_t	page_code;
1775 
1776 			cdbp = (void *)pkt->pkt_cdbp;
1777 			page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 			switch (page_code) {
1779 			case 0x3:
1780 			case 0x4:
1781 				(void) mrsas_mode_sense_build(pkt);
1782 				return_raid_msg_pkt(instance, cmd);
1783 				*cmd_done = 1;
1784 				return (NULL);
1785 			}
1786 			return (cmd);
1787 		}
1788 
1789 		default:
1790 			/* Pass-through command to logical drive */
1791 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1792 			    MPI2_FUNCTION_LD_IO_REQUEST);
1793 			ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1794 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1795 			    acmd->device_id);
1796 			ReqDescUnion->SCSIIO.RequestFlags =
1797 			    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1798 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1799 			break;
1800 		}
1801 	} else { /* Physical */
1802 		/* Pass-through command to physical drive */
1803 
1804 		/* Acquire SYNC MAP UPDATE lock */
1805 		mutex_enter(&instance->sync_map_mtx);
1806 
1807 		local_map_ptr = instance->ld_map[instance->map_id & 1];
1808 
1809 		ddi_put8(acc_handle, &scsi_raid_io->Function,
1810 		    MPI2_FUNCTION_SCSI_IO_REQUEST);
1811 
1812 		ReqDescUnion->SCSIIO.RequestFlags =
1813 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1814 		    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1815 
1816 		ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1817 		    local_map_ptr->raidMap.
1818 		    devHndlInfo[acmd->device_id].curDevHdl);
1819 
1820 		/* Set regLockFlasgs to REGION_TYPE_BYPASS */
1821 		ddi_put8(acc_handle,
1822 		    &scsi_raid_io->RaidContext.regLockFlags, 0);
1823 		ddi_put64(acc_handle,
1824 		    &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1825 		ddi_put32(acc_handle,
1826 		    &scsi_raid_io->RaidContext.regLockLength, 0);
1827 		ddi_put8(acc_handle,
1828 		    &scsi_raid_io->RaidContext.RAIDFlags,
1829 		    MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1830 		    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1831 		ddi_put16(acc_handle,
1832 		    &scsi_raid_io->RaidContext.timeoutValue,
1833 		    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1834 		ddi_put16(acc_handle,
1835 		    &scsi_raid_io->RaidContext.ldTargetId,
1836 		    acmd->device_id);
1837 		ddi_put8(acc_handle,
1838 		    &scsi_raid_io->LUN[1], acmd->lun);
1839 
1840 		if (instance->fast_path_io && instance->gen3) {
1841 			uint16_t IoFlags = ddi_get16(acc_handle,
1842 			    &scsi_raid_io->IoFlags);
1843 			IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1844 			ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1845 		}
1846 		ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1847 		    local_map_ptr->raidMap.
1848 		    devHndlInfo[acmd->device_id].curDevHdl);
1849 
1850 		/* Release SYNC MAP UPDATE lock */
1851 		mutex_exit(&instance->sync_map_mtx);
1852 	}
1853 
1854 	/* Set sense buffer physical address/length in scsi_io_request. */
1855 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1856 	    cmd->sense_phys_addr1);
1857 	ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1858 
1859 	/* Construct SGL */
1860 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1861 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1862 
1863 	(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1864 	    scsi_raid_io, &datalen);
1865 
1866 	ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1867 
1868 	con_log(CL_ANN, (CE_CONT,
1869 	    "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1870 	    pkt->pkt_cdbp[0], acmd->device_id));
1871 	con_log(CL_DLEVEL1, (CE_CONT,
1872 	    "data length = %x\n",
1873 	    scsi_raid_io->DataLength));
1874 	con_log(CL_DLEVEL1, (CE_CONT,
1875 	    "cdb length = %x\n",
1876 	    acmd->cmd_cdblen));
1877 
1878 	return (cmd);
1879 }
1880 
1881 uint32_t
tbolt_read_fw_status_reg(struct mrsas_instance * instance)1882 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1883 {
1884 	return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1885 }
1886 
1887 void
tbolt_issue_cmd(struct mrsas_cmd * cmd,struct mrsas_instance * instance)1888 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1889 {
1890 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1891 	atomic_inc_16(&instance->fw_outstanding);
1892 
1893 	struct scsi_pkt *pkt;
1894 
1895 	con_log(CL_ANN1,
1896 	    (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1897 
1898 	con_log(CL_DLEVEL1, (CE_CONT,
1899 	    " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1900 	con_log(CL_DLEVEL1, (CE_CONT,
1901 	    " [req desc low part] %x \n",
1902 	    (uint_t)(req_desc->Words & 0xffffffffff)));
1903 	con_log(CL_DLEVEL1, (CE_CONT,
1904 	    " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1905 	pkt = cmd->pkt;
1906 
1907 	if (pkt) {
1908 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1909 		    "ISSUED CMD TO FW : called : cmd:"
1910 		    ": %p instance : %p pkt : %p pkt_time : %x\n",
1911 		    gethrtime(), (void *)cmd, (void *)instance,
1912 		    (void *)pkt, cmd->drv_pkt_time));
1913 		if (instance->adapterresetinprogress) {
1914 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1915 			con_log(CL_ANN, (CE_NOTE,
1916 			    "TBOLT Reset the scsi_pkt timer"));
1917 		} else {
1918 			push_pending_mfi_pkt(instance, cmd);
1919 		}
1920 
1921 	} else {
1922 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1923 		    "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1924 		    "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1925 	}
1926 
1927 	/* Issue the command to the FW */
1928 	mutex_enter(&instance->reg_write_mtx);
1929 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1930 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1931 	mutex_exit(&instance->reg_write_mtx);
1932 }
1933 
1934 /*
1935  * issue_cmd_in_sync_mode
1936  */
1937 int
tbolt_issue_cmd_in_sync_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)1938 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1939     struct mrsas_cmd *cmd)
1940 {
1941 	int		i;
1942 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1943 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1944 
1945 	struct mrsas_header	*hdr;
1946 	hdr = (struct mrsas_header *)&cmd->frame->hdr;
1947 
1948 	con_log(CL_ANN,
1949 	    (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1950 	    cmd->SMID));
1951 
1952 
1953 	if (instance->adapterresetinprogress) {
1954 		cmd->drv_pkt_time = ddi_get16
1955 		    (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1956 		if (cmd->drv_pkt_time < debug_timeout_g)
1957 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1958 		con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1959 		    "RESET-IN-PROGRESS, issue cmd & return."));
1960 
1961 		mutex_enter(&instance->reg_write_mtx);
1962 		WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1963 		WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1964 		mutex_exit(&instance->reg_write_mtx);
1965 
1966 		return (DDI_SUCCESS);
1967 	} else {
1968 		con_log(CL_ANN1, (CE_NOTE,
1969 		    "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1970 		push_pending_mfi_pkt(instance, cmd);
1971 	}
1972 
1973 	con_log(CL_DLEVEL2, (CE_NOTE,
1974 	    "HighQport offset :%p",
1975 	    (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1976 	con_log(CL_DLEVEL2, (CE_NOTE,
1977 	    "LowQport offset :%p",
1978 	    (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1979 
1980 	cmd->sync_cmd = MRSAS_TRUE;
1981 	cmd->cmd_status =  ENODATA;
1982 
1983 
1984 	mutex_enter(&instance->reg_write_mtx);
1985 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1986 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1987 	mutex_exit(&instance->reg_write_mtx);
1988 
1989 	con_log(CL_ANN1, (CE_NOTE,
1990 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1991 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1992 	    (uint_t)(req_desc->Words & 0xffffffff)));
1993 
1994 	mutex_enter(&instance->int_cmd_mtx);
1995 	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
1996 		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
1997 	}
1998 	mutex_exit(&instance->int_cmd_mtx);
1999 
2000 
2001 	if (i < (msecs -1)) {
2002 		return (DDI_SUCCESS);
2003 	} else {
2004 		return (DDI_FAILURE);
2005 	}
2006 }
2007 
2008 /*
2009  * issue_cmd_in_poll_mode
2010  */
2011 int
tbolt_issue_cmd_in_poll_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2012 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2013     struct mrsas_cmd *cmd)
2014 {
2015 	int		i;
2016 	uint16_t	flags;
2017 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2018 	struct mrsas_header *frame_hdr;
2019 
2020 	con_log(CL_ANN,
2021 	    (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2022 	    cmd->SMID));
2023 
2024 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2025 
2026 	frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2027 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2028 	    MFI_CMD_STATUS_POLL_MODE);
2029 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2030 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2031 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2032 
2033 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2034 	    (uint_t)(req_desc->Words & 0xffffffff)));
2035 	con_log(CL_ANN1, (CE_NOTE,
2036 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2037 
2038 	/* issue the frame using inbound queue port */
2039 	mutex_enter(&instance->reg_write_mtx);
2040 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2041 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2042 	mutex_exit(&instance->reg_write_mtx);
2043 
2044 	for (i = 0; i < msecs && (
2045 	    ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2046 	    == MFI_CMD_STATUS_POLL_MODE); i++) {
2047 		/* wait for cmd_status to change from 0xFF */
2048 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2049 	}
2050 
2051 	DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2052 
2053 	if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2054 	    &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2055 		con_log(CL_ANN1, (CE_NOTE,
2056 		    " cmd failed %" PRIx64, (req_desc->Words)));
2057 		return (DDI_FAILURE);
2058 	}
2059 
2060 	return (DDI_SUCCESS);
2061 }
2062 
2063 void
tbolt_enable_intr(struct mrsas_instance * instance)2064 tbolt_enable_intr(struct mrsas_instance *instance)
2065 {
2066 	/* TODO: For Thunderbolt/Invader also clear intr on enable */
2067 	/* writel(~0, &regs->outbound_intr_status); */
2068 	/* readl(&regs->outbound_intr_status); */
2069 
2070 	WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2071 
2072 	/* dummy read to force PCI flush */
2073 	(void) RD_OB_INTR_MASK(instance);
2074 
2075 }
2076 
2077 void
tbolt_disable_intr(struct mrsas_instance * instance)2078 tbolt_disable_intr(struct mrsas_instance *instance)
2079 {
2080 	uint32_t mask = 0xFFFFFFFF;
2081 
2082 	WR_OB_INTR_MASK(mask, instance);
2083 
2084 	/* Dummy readl to force pci flush */
2085 
2086 	(void) RD_OB_INTR_MASK(instance);
2087 }
2088 
2089 
2090 int
tbolt_intr_ack(struct mrsas_instance * instance)2091 tbolt_intr_ack(struct mrsas_instance *instance)
2092 {
2093 	uint32_t	status;
2094 
2095 	/* check if it is our interrupt */
2096 	status = RD_OB_INTR_STATUS(instance);
2097 	con_log(CL_ANN1, (CE_NOTE,
2098 	    "chkpnt: Entered tbolt_intr_ack status = %d", status));
2099 
2100 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2101 		return (DDI_INTR_UNCLAIMED);
2102 	}
2103 
2104 	if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2105 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2106 		return (DDI_INTR_UNCLAIMED);
2107 	}
2108 
2109 	if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2110 		/* clear the interrupt by writing back the same value */
2111 		WR_OB_INTR_STATUS(status, instance);
2112 		/* dummy READ */
2113 		(void) RD_OB_INTR_STATUS(instance);
2114 	}
2115 	return (DDI_INTR_CLAIMED);
2116 }
2117 
2118 /*
2119  * get_raid_msg_pkt : Get a command from the free pool
2120  * After successful allocation, the caller of this routine
2121  * must clear the frame buffer (memset to zero) before
2122  * using the packet further.
2123  *
2124  * ***** Note *****
2125  * After clearing the frame buffer the context id of the
2126  * frame buffer SHOULD be restored back.
2127  */
2128 
2129 struct mrsas_cmd *
get_raid_msg_pkt(struct mrsas_instance * instance)2130 get_raid_msg_pkt(struct mrsas_instance *instance)
2131 {
2132 	mlist_t			*head = &instance->cmd_pool_list;
2133 	struct mrsas_cmd	*cmd = NULL;
2134 
2135 	mutex_enter(&instance->cmd_pool_mtx);
2136 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2137 
2138 
2139 	if (!mlist_empty(head)) {
2140 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2141 		mlist_del_init(head->next);
2142 	}
2143 	if (cmd != NULL) {
2144 		cmd->pkt = NULL;
2145 		cmd->retry_count_for_ocr = 0;
2146 		cmd->drv_pkt_time = 0;
2147 	}
2148 	mutex_exit(&instance->cmd_pool_mtx);
2149 
2150 	if (cmd != NULL)
2151 		bzero(cmd->scsi_io_request,
2152 		    sizeof (Mpi2RaidSCSIIORequest_t));
2153 	return (cmd);
2154 }
2155 
2156 struct mrsas_cmd *
get_raid_msg_mfi_pkt(struct mrsas_instance * instance)2157 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2158 {
2159 	mlist_t			*head = &instance->cmd_app_pool_list;
2160 	struct mrsas_cmd	*cmd = NULL;
2161 
2162 	mutex_enter(&instance->cmd_app_pool_mtx);
2163 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2164 
2165 	if (!mlist_empty(head)) {
2166 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2167 		mlist_del_init(head->next);
2168 	}
2169 	if (cmd != NULL) {
2170 		cmd->retry_count_for_ocr = 0;
2171 		cmd->drv_pkt_time = 0;
2172 		cmd->pkt = NULL;
2173 		cmd->request_desc = NULL;
2174 
2175 	}
2176 
2177 	mutex_exit(&instance->cmd_app_pool_mtx);
2178 
2179 	if (cmd != NULL) {
2180 		bzero(cmd->scsi_io_request,
2181 		    sizeof (Mpi2RaidSCSIIORequest_t));
2182 	}
2183 
2184 	return (cmd);
2185 }
2186 
2187 /*
2188  * return_raid_msg_pkt : Return a cmd to free command pool
2189  */
2190 void
return_raid_msg_pkt(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2191 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2192 {
2193 	mutex_enter(&instance->cmd_pool_mtx);
2194 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2195 
2196 
2197 	mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2198 
2199 	mutex_exit(&instance->cmd_pool_mtx);
2200 }
2201 
2202 void
return_raid_msg_mfi_pkt(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2203 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2204 {
2205 	mutex_enter(&instance->cmd_app_pool_mtx);
2206 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2207 
2208 	mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2209 
2210 	mutex_exit(&instance->cmd_app_pool_mtx);
2211 }
2212 
2213 
2214 void
mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2215 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2216     struct mrsas_cmd *cmd)
2217 {
2218 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2219 	Mpi25IeeeSgeChain64_t		*scsi_raid_io_sgl_ieee;
2220 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
2221 	uint32_t			index;
2222 	ddi_acc_handle_t acc_handle =
2223 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2224 
2225 	if (!instance->tbolt) {
2226 		con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2227 		return;
2228 	}
2229 
2230 	index = cmd->index;
2231 
2232 	ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2233 
2234 	if (!ReqDescUnion) {
2235 		con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2236 		return;
2237 	}
2238 
2239 	con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2240 
2241 	ReqDescUnion->Words = 0;
2242 
2243 	ReqDescUnion->SCSIIO.RequestFlags =
2244 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2245 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2246 
2247 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2248 
2249 	cmd->request_desc = ReqDescUnion;
2250 
2251 	/* get raid message frame pointer */
2252 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2253 
2254 	if (instance->gen3) {
2255 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2256 		    &scsi_raid_io->SGL.IeeeChain;
2257 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2258 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2259 	}
2260 
2261 	ddi_put8(acc_handle, &scsi_raid_io->Function,
2262 	    MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2263 
2264 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2265 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2266 
2267 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2268 	    (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2269 
2270 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2271 	    cmd->sense_phys_addr1);
2272 
2273 
2274 	scsi_raid_io_sgl_ieee =
2275 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2276 
2277 	ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2278 	    (U64)cmd->frame_phys_addr);
2279 
2280 	ddi_put8(acc_handle,
2281 	    &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2282 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2283 	/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2284 	ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2285 
2286 	con_log(CL_ANN1, (CE_NOTE,
2287 	    "[MFI CMD PHY ADDRESS]:%" PRIx64,
2288 	    scsi_raid_io_sgl_ieee->Address));
2289 	con_log(CL_ANN1, (CE_NOTE,
2290 	    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2291 	con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2292 	    scsi_raid_io_sgl_ieee->Flags));
2293 }
2294 
2295 
2296 void
tbolt_complete_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2297 tbolt_complete_cmd(struct mrsas_instance *instance,
2298     struct mrsas_cmd *cmd)
2299 {
2300 	uint8_t				status;
2301 	uint8_t				extStatus;
2302 	uint8_t				function;
2303 	uint8_t				arm;
2304 	struct scsa_cmd			*acmd;
2305 	struct scsi_pkt			*pkt;
2306 	struct scsi_arq_status		*arqstat;
2307 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2308 	LD_LOAD_BALANCE_INFO		*lbinfo;
2309 	ddi_acc_handle_t acc_handle =
2310 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2311 
2312 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2313 
2314 	status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2315 	extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2316 
2317 	con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2318 	con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2319 
2320 	if (status != MFI_STAT_OK) {
2321 		con_log(CL_ANN, (CE_WARN,
2322 		    "IO Cmd Failed SMID %x", cmd->SMID));
2323 	} else {
2324 		con_log(CL_ANN, (CE_NOTE,
2325 		    "IO Cmd Success  SMID %x", cmd->SMID));
2326 	}
2327 
2328 	/* regular commands */
2329 
2330 	function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2331 	DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2332 	    uint8_t, status, uint8_t, extStatus);
2333 
2334 	switch (function) {
2335 
2336 	case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2337 		acmd =	(struct scsa_cmd *)cmd->cmd;
2338 		lbinfo = &instance->load_balance_info[acmd->device_id];
2339 
2340 		if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2341 			arm = lbinfo->raid1DevHandle[0] ==
2342 			    scsi_raid_io->DevHandle ? 0 : 1;
2343 
2344 			lbinfo->scsi_pending_cmds[arm]--;
2345 			cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2346 		}
2347 		con_log(CL_DLEVEL3, (CE_NOTE,
2348 		    "FastPath IO Completion Success "));
2349 		/* FALLTHRU */
2350 
2351 	case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2352 		acmd =	(struct scsa_cmd *)cmd->cmd;
2353 		pkt =	(struct scsi_pkt *)CMD2PKT(acmd);
2354 
2355 		if (acmd->cmd_flags & CFLAG_DMAVALID) {
2356 			if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2357 				(void) ddi_dma_sync(acmd->cmd_dmahandle,
2358 				    acmd->cmd_dma_offset, acmd->cmd_dma_len,
2359 				    DDI_DMA_SYNC_FORCPU);
2360 			}
2361 		}
2362 
2363 		pkt->pkt_reason		= CMD_CMPLT;
2364 		pkt->pkt_statistics	= 0;
2365 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2366 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2367 
2368 		con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2369 		    "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2370 		    ((acmd->islogical) ? "LD" : "PD"),
2371 		    acmd->cmd_dmacount, cmd->SMID, status));
2372 
2373 		if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2374 			struct scsi_inquiry	*inq;
2375 
2376 			if (acmd->cmd_dmacount != 0) {
2377 				bp_mapin(acmd->cmd_buf);
2378 				inq = (struct scsi_inquiry *)
2379 				    acmd->cmd_buf->b_un.b_addr;
2380 
2381 				/* don't expose physical drives to OS */
2382 				if (acmd->islogical &&
2383 				    (status == MFI_STAT_OK)) {
2384 					display_scsi_inquiry((caddr_t)inq);
2385 				} else if ((status == MFI_STAT_OK) &&
2386 				    inq->inq_dtype == DTYPE_DIRECT) {
2387 					display_scsi_inquiry((caddr_t)inq);
2388 				} else {
2389 					/* for physical disk */
2390 					status = MFI_STAT_DEVICE_NOT_FOUND;
2391 				}
2392 			}
2393 		}
2394 
2395 		switch (status) {
2396 		case MFI_STAT_OK:
2397 			pkt->pkt_scbp[0] = STATUS_GOOD;
2398 			break;
2399 		case MFI_STAT_LD_CC_IN_PROGRESS:
2400 		case MFI_STAT_LD_RECON_IN_PROGRESS:
2401 			pkt->pkt_scbp[0] = STATUS_GOOD;
2402 			break;
2403 		case MFI_STAT_LD_INIT_IN_PROGRESS:
2404 			pkt->pkt_reason	= CMD_TRAN_ERR;
2405 			break;
2406 		case MFI_STAT_SCSI_IO_FAILED:
2407 			dev_err(instance->dip, CE_WARN,
2408 			    "tbolt_complete_cmd: scsi_io failed");
2409 			pkt->pkt_reason	= CMD_TRAN_ERR;
2410 			break;
2411 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
2412 			con_log(CL_ANN, (CE_WARN,
2413 			    "tbolt_complete_cmd: scsi_done with error"));
2414 
2415 			pkt->pkt_reason	= CMD_CMPLT;
2416 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2417 
2418 			if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2419 				con_log(CL_ANN,
2420 				    (CE_WARN, "TEST_UNIT_READY fail"));
2421 			} else {
2422 				pkt->pkt_state |= STATE_ARQ_DONE;
2423 				arqstat = (void *)(pkt->pkt_scbp);
2424 				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2425 				arqstat->sts_rqpkt_resid = 0;
2426 				arqstat->sts_rqpkt_state |=
2427 				    STATE_GOT_BUS | STATE_GOT_TARGET
2428 				    | STATE_SENT_CMD
2429 				    | STATE_XFERRED_DATA;
2430 				*(uint8_t *)&arqstat->sts_rqpkt_status =
2431 				    STATUS_GOOD;
2432 				con_log(CL_ANN1,
2433 				    (CE_NOTE, "Copying Sense data %x",
2434 				    cmd->SMID));
2435 
2436 				ddi_rep_get8(acc_handle,
2437 				    (uint8_t *)&(arqstat->sts_sensedata),
2438 				    cmd->sense1,
2439 				    sizeof (struct scsi_extended_sense),
2440 				    DDI_DEV_AUTOINCR);
2441 
2442 			}
2443 			break;
2444 		case MFI_STAT_LD_OFFLINE:
2445 			dev_err(instance->dip, CE_WARN,
2446 			    "tbolt_complete_cmd: ld offline "
2447 			    "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2448 			    /* UNDO: */
2449 			    ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2450 
2451 			    ddi_get16(acc_handle,
2452 			    &scsi_raid_io->RaidContext.ldTargetId),
2453 
2454 			    ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2455 
2456 			pkt->pkt_reason	= CMD_DEV_GONE;
2457 			pkt->pkt_statistics  = STAT_DISCON;
2458 			break;
2459 		case MFI_STAT_DEVICE_NOT_FOUND:
2460 			con_log(CL_ANN, (CE_CONT,
2461 			    "tbolt_complete_cmd: device not found error"));
2462 			pkt->pkt_reason	= CMD_DEV_GONE;
2463 			pkt->pkt_statistics  = STAT_DISCON;
2464 			break;
2465 
2466 		case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2467 			pkt->pkt_state |= STATE_ARQ_DONE;
2468 			pkt->pkt_reason	= CMD_CMPLT;
2469 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2470 
2471 			arqstat = (void *)(pkt->pkt_scbp);
2472 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
2473 			arqstat->sts_rqpkt_resid = 0;
2474 			arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2475 			    | STATE_GOT_TARGET | STATE_SENT_CMD
2476 			    | STATE_XFERRED_DATA;
2477 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2478 
2479 			arqstat->sts_sensedata.es_valid = 1;
2480 			arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2481 			arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2482 
2483 			/*
2484 			 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 			 * ASC: 0x21h; ASCQ: 0x00h;
2486 			 */
2487 			arqstat->sts_sensedata.es_add_code = 0x21;
2488 			arqstat->sts_sensedata.es_qual_code = 0x00;
2489 			break;
2490 		case MFI_STAT_INVALID_CMD:
2491 		case MFI_STAT_INVALID_DCMD:
2492 		case MFI_STAT_INVALID_PARAMETER:
2493 		case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2494 		default:
2495 			dev_err(instance->dip, CE_WARN,
2496 			    "tbolt_complete_cmd: Unknown status!");
2497 			pkt->pkt_reason	= CMD_TRAN_ERR;
2498 
2499 			break;
2500 		}
2501 
2502 		atomic_add_16(&instance->fw_outstanding, (-1));
2503 
2504 		(void) mrsas_common_check(instance, cmd);
2505 		if (acmd->cmd_dmahandle) {
2506 			if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2507 			    DDI_SUCCESS) {
2508 				ddi_fm_service_impact(instance->dip,
2509 				    DDI_SERVICE_UNAFFECTED);
2510 				pkt->pkt_reason = CMD_TRAN_ERR;
2511 				pkt->pkt_statistics = 0;
2512 			}
2513 		}
2514 
2515 		/* Call the callback routine */
2516 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2517 			(*pkt->pkt_comp)(pkt);
2518 
2519 		con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2520 
2521 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2522 
2523 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2524 
2525 		return_raid_msg_pkt(instance, cmd);
2526 		break;
2527 	}
2528 	case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	 /* MFA command. */
2529 
2530 		if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2531 		    cmd->frame->dcmd.mbox.b[1] == 1) {
2532 
2533 			mutex_enter(&instance->sync_map_mtx);
2534 
2535 			con_log(CL_ANN, (CE_NOTE,
2536 			    "LDMAP sync command	SMID RECEIVED 0x%X",
2537 			    cmd->SMID));
2538 			if (cmd->frame->hdr.cmd_status != 0) {
2539 				dev_err(instance->dip, CE_WARN,
2540 				    "map sync failed, status = 0x%x.",
2541 				    cmd->frame->hdr.cmd_status);
2542 			} else {
2543 				instance->map_id++;
2544 				con_log(CL_ANN1, (CE_NOTE,
2545 				    "map sync received, switched map_id to %"
2546 				    PRIu64, instance->map_id));
2547 			}
2548 
2549 			if (MR_ValidateMapInfo(
2550 			    instance->ld_map[instance->map_id & 1],
2551 			    instance->load_balance_info)) {
2552 				instance->fast_path_io = 1;
2553 			} else {
2554 				instance->fast_path_io = 0;
2555 			}
2556 
2557 			con_log(CL_ANN, (CE_NOTE,
2558 			    "instance->fast_path_io %d",
2559 			    instance->fast_path_io));
2560 
2561 			instance->unroll.syncCmd = 0;
2562 
2563 			if (instance->map_update_cmd == cmd) {
2564 				return_raid_msg_pkt(instance, cmd);
2565 				atomic_add_16(&instance->fw_outstanding, (-1));
2566 				(void) mrsas_tbolt_sync_map_info(instance);
2567 			}
2568 
2569 			con_log(CL_ANN1, (CE_NOTE,
2570 			    "LDMAP sync completed, ldcount=%d",
2571 			    instance->ld_map[instance->map_id & 1]
2572 			    ->raidMap.ldCount));
2573 			mutex_exit(&instance->sync_map_mtx);
2574 			break;
2575 		}
2576 
2577 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2578 			con_log(CL_ANN1, (CE_CONT,
2579 			    "AEN command SMID RECEIVED 0x%X",
2580 			    cmd->SMID));
2581 			if ((instance->aen_cmd == cmd) &&
2582 			    (instance->aen_cmd->abort_aen)) {
2583 				con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2584 				    "aborted_aen returned"));
2585 			} else {
2586 				atomic_add_16(&instance->fw_outstanding, (-1));
2587 				service_mfi_aen(instance, cmd);
2588 			}
2589 		}
2590 
2591 		if (cmd->sync_cmd == MRSAS_TRUE) {
2592 			con_log(CL_ANN1, (CE_CONT,
2593 			    "Sync-mode Command Response SMID RECEIVED 0x%X",
2594 			    cmd->SMID));
2595 
2596 			tbolt_complete_cmd_in_sync_mode(instance, cmd);
2597 		} else {
2598 			con_log(CL_ANN, (CE_CONT,
2599 			    "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2600 			    cmd->SMID));
2601 		}
2602 		break;
2603 	default:
2604 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2605 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2606 
2607 		/* free message */
2608 		con_log(CL_ANN,
2609 		    (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2610 		break;
2611 	}
2612 }
2613 
2614 uint_t
mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance * instance)2615 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2616 {
2617 	uint8_t				replyType;
2618 	Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2619 	Mpi2ReplyDescriptorsUnion_t	*desc;
2620 	uint16_t			smid;
2621 	union desc_value		d_val;
2622 	struct mrsas_cmd		*cmd;
2623 
2624 	struct mrsas_header	*hdr;
2625 	struct scsi_pkt		*pkt;
2626 
2627 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2628 	    0, 0, DDI_DMA_SYNC_FORDEV);
2629 
2630 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2631 	    0, 0, DDI_DMA_SYNC_FORCPU);
2632 
2633 	desc = instance->reply_frame_pool;
2634 	desc += instance->reply_read_index;
2635 
2636 	replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2637 	replyType = replyDesc->ReplyFlags &
2638 	    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2639 
2640 	if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2641 		return (DDI_INTR_UNCLAIMED);
2642 
2643 	if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2644 	    != DDI_SUCCESS) {
2645 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2646 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2647 		con_log(CL_ANN1,
2648 		    (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2649 		    "FMA check, returning DDI_INTR_UNCLAIMED"));
2650 		return (DDI_INTR_CLAIMED);
2651 	}
2652 
2653 	con_log(CL_ANN1, (CE_NOTE, "Reply Desc	= %p  Words = %" PRIx64,
2654 	    (void *)desc, desc->Words));
2655 
2656 	d_val.word = desc->Words;
2657 
2658 
2659 	/* Read Reply descriptor */
2660 	while ((d_val.u1.low != 0xffffffff) &&
2661 	    (d_val.u1.high != 0xffffffff)) {
2662 
2663 		(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2664 		    0, 0, DDI_DMA_SYNC_FORCPU);
2665 
2666 		smid = replyDesc->SMID;
2667 
2668 		if (!smid || smid > instance->max_fw_cmds + 1) {
2669 			con_log(CL_ANN1, (CE_NOTE,
2670 			    "Reply Desc at Break  = %p	Words = %" PRIx64,
2671 			    (void *)desc, desc->Words));
2672 			break;
2673 		}
2674 
2675 		cmd	= instance->cmd_list[smid - 1];
2676 		if (!cmd) {
2677 			con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2678 			    "outstanding_cmd: Invalid command "
2679 			    " or Poll commad Received in completion path"));
2680 		} else {
2681 			mutex_enter(&instance->cmd_pend_mtx);
2682 			if (cmd->sync_cmd == MRSAS_TRUE) {
2683 				hdr = (struct mrsas_header *)&cmd->frame->hdr;
2684 				if (hdr) {
2685 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2686 					    "tbolt_process_outstanding_cmd:"
2687 					    " mlist_del_init(&cmd->list)."));
2688 					mlist_del_init(&cmd->list);
2689 				}
2690 			} else {
2691 				pkt = cmd->pkt;
2692 				if (pkt) {
2693 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2694 					    "tbolt_process_outstanding_cmd:"
2695 					    "mlist_del_init(&cmd->list)."));
2696 					mlist_del_init(&cmd->list);
2697 				}
2698 			}
2699 
2700 			mutex_exit(&instance->cmd_pend_mtx);
2701 
2702 			tbolt_complete_cmd(instance, cmd);
2703 		}
2704 		/* set it back to all 1s. */
2705 		desc->Words = -1LL;
2706 
2707 		instance->reply_read_index++;
2708 
2709 		if (instance->reply_read_index >= (instance->reply_q_depth)) {
2710 			con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2711 			instance->reply_read_index = 0;
2712 		}
2713 
2714 		/* Get the next reply descriptor */
2715 		if (!instance->reply_read_index)
2716 			desc = instance->reply_frame_pool;
2717 		else
2718 			desc++;
2719 
2720 		replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 
2722 		d_val.word = desc->Words;
2723 
2724 		con_log(CL_ANN1, (CE_NOTE,
2725 		    "Next Reply Desc  = %p Words = %" PRIx64,
2726 		    (void *)desc, desc->Words));
2727 
2728 		replyType = replyDesc->ReplyFlags &
2729 		    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2730 
2731 		if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2732 			break;
2733 
2734 	} /* End of while loop. */
2735 
2736 	/* update replyIndex to FW */
2737 	WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2738 
2739 
2740 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2741 	    0, 0, DDI_DMA_SYNC_FORDEV);
2742 
2743 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2744 	    0, 0, DDI_DMA_SYNC_FORCPU);
2745 	return (DDI_INTR_CLAIMED);
2746 }
2747 
2748 
2749 
2750 
2751 /*
2752  * complete_cmd_in_sync_mode -	Completes an internal command
2753  * @instance:			Adapter soft state
2754  * @cmd:			Command to be completed
2755  *
2756  * The issue_cmd_in_sync_mode() function waits for a command to complete
2757  * after it issues a command. This function wakes up that waiting routine by
2758  * calling wake_up() on the wait queue.
2759  */
2760 void
tbolt_complete_cmd_in_sync_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2761 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2762     struct mrsas_cmd *cmd)
2763 {
2764 
2765 	cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2766 	    &cmd->frame->io.cmd_status);
2767 
2768 	cmd->sync_cmd = MRSAS_FALSE;
2769 
2770 	mutex_enter(&instance->int_cmd_mtx);
2771 	if (cmd->cmd_status == ENODATA) {
2772 		cmd->cmd_status = 0;
2773 	}
2774 	cv_broadcast(&instance->int_cmd_cv);
2775 	mutex_exit(&instance->int_cmd_mtx);
2776 
2777 }
2778 
2779 /*
2780  * mrsas_tbolt_get_ld_map_info -	Returns	 ld_map structure
2781  * instance:				Adapter soft state
2782  *
2783  * Issues an internal command (DCMD) to get the FW's controller PD
2784  * list structure.  This information is mainly used to find out SYSTEM
2785  * supported by the FW.
2786  */
2787 int
mrsas_tbolt_get_ld_map_info(struct mrsas_instance * instance)2788 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2789 {
2790 	int ret = 0;
2791 	struct mrsas_cmd	*cmd = NULL;
2792 	struct mrsas_dcmd_frame	*dcmd;
2793 	MR_FW_RAID_MAP_ALL *ci;
2794 	uint32_t ci_h = 0;
2795 	U32 size_map_info;
2796 
2797 	cmd = get_raid_msg_pkt(instance);
2798 
2799 	if (cmd == NULL) {
2800 		dev_err(instance->dip, CE_WARN,
2801 		    "Failed to get a cmd from free-pool in get_ld_map_info()");
2802 		return (DDI_FAILURE);
2803 	}
2804 
2805 	dcmd = &cmd->frame->dcmd;
2806 
2807 	size_map_info =	sizeof (MR_FW_RAID_MAP) +
2808 	    (sizeof (MR_LD_SPAN_MAP) *
2809 	    (MAX_LOGICAL_DRIVES - 1));
2810 
2811 	con_log(CL_ANN, (CE_NOTE,
2812 	    "size_map_info : 0x%x", size_map_info));
2813 
2814 	ci = instance->ld_map[instance->map_id & 1];
2815 	ci_h = instance->ld_map_phy[instance->map_id & 1];
2816 
2817 	if (!ci) {
2818 		dev_err(instance->dip, CE_WARN,
2819 		    "Failed to alloc mem for ld_map_info");
2820 		return_raid_msg_pkt(instance, cmd);
2821 		return (-1);
2822 	}
2823 
2824 	bzero(ci, sizeof (*ci));
2825 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2826 
2827 	dcmd->cmd = MFI_CMD_OP_DCMD;
2828 	dcmd->cmd_status = 0xFF;
2829 	dcmd->sge_count = 1;
2830 	dcmd->flags = MFI_FRAME_DIR_READ;
2831 	dcmd->timeout = 0;
2832 	dcmd->pad_0 = 0;
2833 	dcmd->data_xfer_len = size_map_info;
2834 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2835 	dcmd->sgl.sge32[0].phys_addr = ci_h;
2836 	dcmd->sgl.sge32[0].length = size_map_info;
2837 
2838 
2839 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2840 
2841 	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2842 		ret = 0;
2843 		con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2844 	} else {
2845 		dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2846 		ret = -1;
2847 	}
2848 
2849 	return_raid_msg_pkt(instance, cmd);
2850 
2851 	return (ret);
2852 }
2853 
2854 void
mrsas_dump_reply_desc(struct mrsas_instance * instance)2855 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2856 {
2857 	uint32_t i;
2858 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2859 	union desc_value d_val;
2860 
2861 	reply_desc = instance->reply_frame_pool;
2862 
2863 	for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2864 		d_val.word = reply_desc->Words;
2865 		con_log(CL_DLEVEL3, (CE_NOTE,
2866 		    "i=%d, %x:%x",
2867 		    i, d_val.u1.high, d_val.u1.low));
2868 	}
2869 }
2870 
2871 /*
2872  * mrsas_tbolt_command_create -	Create command for fast path.
2873  * @io_info:	MegaRAID IO request packet pointer.
2874  * @ref_tag:	Reference tag for RD/WRPROTECT
2875  *
2876  * Create the command for fast path.
2877  */
2878 void
mrsas_tbolt_prepare_cdb(struct mrsas_instance * instance,U8 cdb[],struct IO_REQUEST_INFO * io_info,Mpi2RaidSCSIIORequest_t * scsi_io_request,U32 ref_tag)2879 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2880     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2881     U32 ref_tag)
2882 {
2883 	uint16_t		EEDPFlags;
2884 	uint32_t		Control;
2885 	ddi_acc_handle_t acc_handle =
2886 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2887 
2888 	/* Prepare 32-byte CDB if DIF is supported on this device */
2889 	con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2890 
2891 	bzero(cdb, 32);
2892 
2893 	cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2894 
2895 
2896 	cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2897 
2898 	if (io_info->isRead)
2899 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2900 	else
2901 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2902 
2903 	/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2904 	cdb[10] = MRSAS_RD_WR_PROTECT;
2905 
2906 	/* LOGICAL BLOCK ADDRESS */
2907 	cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2908 	cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2909 	cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2910 	cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2911 	cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2912 	cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2913 	cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2914 	cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2915 
2916 	/* Logical block reference tag */
2917 	ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2918 	    BE_32(ref_tag));
2919 
2920 	ddi_put16(acc_handle,
2921 	    &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2922 
2923 	ddi_put32(acc_handle, &scsi_io_request->DataLength,
2924 	    ((io_info->numBlocks)*512));
2925 	/* Specify 32-byte cdb */
2926 	ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2927 
2928 	/* Transfer length */
2929 	cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2930 	cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2931 	cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2932 	cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2933 
2934 	/* set SCSI IO EEDPFlags */
2935 	EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2936 	Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2937 
2938 	/* set SCSI IO EEDPFlags bits */
2939 	if (io_info->isRead) {
2940 		/*
2941 		 * For READ commands, the EEDPFlags shall be set to specify to
2942 		 * Increment the Primary Reference Tag, to Check the Reference
2943 		 * Tag, and to Check and Remove the Protection Information
2944 		 * fields.
2945 		 */
2946 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2947 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG	|
2948 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP	|
2949 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG	|
2950 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2951 	} else {
2952 		/*
2953 		 * For WRITE commands, the EEDPFlags shall be set to specify to
2954 		 * Increment the Primary Reference Tag, and to Insert
2955 		 * Protection Information fields.
2956 		 */
2957 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2958 		    MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2959 	}
2960 	Control |= (0x4 << 26);
2961 
2962 	ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2963 	ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2964 	ddi_put32(acc_handle,
2965 	    &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2966 }
2967 
2968 
2969 /*
2970  * mrsas_tbolt_set_pd_lba -	Sets PD LBA
2971  * @cdb:		CDB
2972  * @cdb_size:		CDB size
2973  * @cdb_len_ptr:	cdb length
2974  * @start_blk:		Start block of IO
2975  * @num_blocks:		Number of blocks
2976  *
2977  * Used to set the PD LBA in CDB for FP IOs
2978  */
2979 static void
mrsas_tbolt_set_pd_lba(U8 * cdb,size_t cdb_size,uint8_t * cdb_len_ptr,U64 start_blk,U32 num_blocks)2980 mrsas_tbolt_set_pd_lba(U8 *cdb, size_t cdb_size, uint8_t *cdb_len_ptr,
2981     U64 start_blk, U32 num_blocks)
2982 {
2983 	U8 cdb_len = *cdb_len_ptr;
2984 	U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2985 
2986 	/* Some drives don't support 16/12 byte CDB's, convert to 10 */
2987 	if (((cdb_len == 12) || (cdb_len == 16)) &&
2988 	    (start_blk <= 0xffffffff)) {
2989 		if (cdb_len == 16) {
2990 			con_log(CL_ANN,
2991 			    (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2992 			opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2993 			flagvals = cdb[1];
2994 			groupnum = cdb[14];
2995 			control = cdb[15];
2996 		} else {
2997 			con_log(CL_ANN,
2998 			    (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2999 			opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3000 			flagvals = cdb[1];
3001 			groupnum = cdb[10];
3002 			control = cdb[11];
3003 		}
3004 
3005 		bzero(cdb, cdb_size);
3006 
3007 		cdb[0] = opcode;
3008 		cdb[1] = flagvals;
3009 		cdb[6] = groupnum;
3010 		cdb[9] = control;
3011 		/* Set transfer length */
3012 		cdb[8] = (U8)(num_blocks & 0xff);
3013 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3014 		cdb_len = 10;
3015 	} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3016 		/* Convert to 16 byte CDB for large LBA's */
3017 		con_log(CL_ANN,
3018 		    (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3019 		switch (cdb_len) {
3020 		case 6:
3021 			opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3022 			control = cdb[5];
3023 			break;
3024 		case 10:
3025 			opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3026 			flagvals = cdb[1];
3027 			groupnum = cdb[6];
3028 			control = cdb[9];
3029 			break;
3030 		case 12:
3031 			opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3032 			flagvals = cdb[1];
3033 			groupnum = cdb[10];
3034 			control = cdb[11];
3035 			break;
3036 		}
3037 
3038 		bzero(cdb, cdb_size);
3039 
3040 		cdb[0] = opcode;
3041 		cdb[1] = flagvals;
3042 		cdb[14] = groupnum;
3043 		cdb[15] = control;
3044 
3045 		/* Transfer length */
3046 		cdb[13] = (U8)(num_blocks & 0xff);
3047 		cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3048 		cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3049 		cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3050 
3051 		/* Specify 16-byte cdb */
3052 		cdb_len = 16;
3053 	} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3054 		/* convert to 10 byte CDB */
3055 		opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3056 		control = cdb[5];
3057 
3058 		bzero(cdb, cdb_size);
3059 		cdb[0] = opcode;
3060 		cdb[9] = control;
3061 
3062 		/* Set transfer length */
3063 		cdb[8] = (U8)(num_blocks & 0xff);
3064 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3065 
3066 		/* Specify 10-byte cdb */
3067 		cdb_len = 10;
3068 	}
3069 
3070 
3071 	/* Fall through Normal case, just load LBA here */
3072 	switch (cdb_len) {
3073 	case 6:
3074 	{
3075 		U8 val = cdb[1] & 0xE0;
3076 		cdb[3] = (U8)(start_blk & 0xff);
3077 		cdb[2] = (U8)((start_blk >> 8) & 0xff);
3078 		cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3079 		break;
3080 	}
3081 	case 10:
3082 		cdb[5] = (U8)(start_blk & 0xff);
3083 		cdb[4] = (U8)((start_blk >> 8) & 0xff);
3084 		cdb[3] = (U8)((start_blk >> 16) & 0xff);
3085 		cdb[2] = (U8)((start_blk >> 24) & 0xff);
3086 		break;
3087 	case 12:
3088 		cdb[5]	  = (U8)(start_blk & 0xff);
3089 		cdb[4]	  = (U8)((start_blk >> 8) & 0xff);
3090 		cdb[3]	  = (U8)((start_blk >> 16) & 0xff);
3091 		cdb[2]	  = (U8)((start_blk >> 24) & 0xff);
3092 		break;
3093 
3094 	case 16:
3095 		cdb[9]	= (U8)(start_blk & 0xff);
3096 		cdb[8]	= (U8)((start_blk >> 8) & 0xff);
3097 		cdb[7]	= (U8)((start_blk >> 16) & 0xff);
3098 		cdb[6]	= (U8)((start_blk >> 24) & 0xff);
3099 		cdb[5]	= (U8)((start_blk >> 32) & 0xff);
3100 		cdb[4]	= (U8)((start_blk >> 40) & 0xff);
3101 		cdb[3]	= (U8)((start_blk >> 48) & 0xff);
3102 		cdb[2]	= (U8)((start_blk >> 56) & 0xff);
3103 		break;
3104 	}
3105 
3106 	*cdb_len_ptr = cdb_len;
3107 }
3108 
3109 
3110 static int
mrsas_tbolt_check_map_info(struct mrsas_instance * instance)3111 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3112 {
3113 	MR_FW_RAID_MAP_ALL *ld_map;
3114 
3115 	if (!mrsas_tbolt_get_ld_map_info(instance)) {
3116 
3117 		ld_map = instance->ld_map[instance->map_id & 1];
3118 
3119 		con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3120 		    ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3121 
3122 		if (MR_ValidateMapInfo(
3123 		    instance->ld_map[instance->map_id & 1],
3124 		    instance->load_balance_info)) {
3125 			con_log(CL_ANN,
3126 			    (CE_CONT, "MR_ValidateMapInfo success"));
3127 
3128 			instance->fast_path_io = 1;
3129 			con_log(CL_ANN,
3130 			    (CE_NOTE, "instance->fast_path_io %d",
3131 			    instance->fast_path_io));
3132 
3133 			return (DDI_SUCCESS);
3134 		}
3135 
3136 	}
3137 
3138 	instance->fast_path_io = 0;
3139 	dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3140 	con_log(CL_ANN, (CE_NOTE,
3141 	    "instance->fast_path_io %d", instance->fast_path_io));
3142 
3143 	return (DDI_FAILURE);
3144 }
3145 
3146 /*
3147  * Marks HBA as bad. This will be called either when an
3148  * IO packet times out even after 3 FW resets
3149  * or FW is found to be fault even after 3 continuous resets.
3150  */
3151 
3152 void
mrsas_tbolt_kill_adapter(struct mrsas_instance * instance)3153 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3154 {
3155 	dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3156 
3157 	if (instance->deadadapter == 1)
3158 		return;
3159 
3160 	con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3161 	    "Writing to doorbell with MFI_STOP_ADP "));
3162 	mutex_enter(&instance->ocr_flags_mtx);
3163 	instance->deadadapter = 1;
3164 	mutex_exit(&instance->ocr_flags_mtx);
3165 	instance->func_ptr->disable_intr(instance);
3166 	WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3167 	/* Flush */
3168 	(void) RD_RESERVED0_REGISTER(instance);
3169 
3170 	(void) mrsas_print_pending_cmds(instance);
3171 	(void) mrsas_complete_pending_cmds(instance);
3172 }
3173 
3174 void
mrsas_reset_reply_desc(struct mrsas_instance * instance)3175 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3176 {
3177 	int i;
3178 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3179 	instance->reply_read_index = 0;
3180 
3181 	/* initializing reply address to 0xFFFFFFFF */
3182 	reply_desc = instance->reply_frame_pool;
3183 
3184 	for (i = 0; i < instance->reply_q_depth; i++) {
3185 		reply_desc->Words = (uint64_t)~0;
3186 		reply_desc++;
3187 	}
3188 }
3189 
3190 int
mrsas_tbolt_reset_ppc(struct mrsas_instance * instance)3191 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3192 {
3193 	uint32_t status = 0x00;
3194 	uint32_t retry = 0;
3195 	uint32_t cur_abs_reg_val;
3196 	uint32_t fw_state;
3197 	uint32_t abs_state;
3198 	uint32_t i;
3199 
3200 	if (instance->deadadapter == 1) {
3201 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3202 		    "no more resets as HBA has been marked dead");
3203 		return (DDI_FAILURE);
3204 	}
3205 
3206 	mutex_enter(&instance->ocr_flags_mtx);
3207 	instance->adapterresetinprogress = 1;
3208 	mutex_exit(&instance->ocr_flags_mtx);
3209 
3210 	instance->func_ptr->disable_intr(instance);
3211 
3212 	/* Add delay in order to complete the ioctl & io cmds in-flight */
3213 	for (i = 0; i < 3000; i++)
3214 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3215 
3216 	instance->reply_read_index = 0;
3217 
3218 retry_reset:
3219 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Resetting TBOLT"));
3220 
3221 	/* Flush */
3222 	WR_TBOLT_IB_WRITE_SEQ(0x0, instance);
3223 	/* Write magic number */
3224 	WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3225 	WR_TBOLT_IB_WRITE_SEQ(0x4, instance);
3226 	WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3227 	WR_TBOLT_IB_WRITE_SEQ(0x2, instance);
3228 	WR_TBOLT_IB_WRITE_SEQ(0x7, instance);
3229 	WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3230 
3231 	con_log(CL_ANN1, (CE_NOTE,
3232 	    "mrsas_tbolt_reset_ppc: magic number written "
3233 	    "to write sequence register"));
3234 
3235 	/* Wait for the diag write enable (DRWE) bit to be set */
3236 	retry = 0;
3237 	status = RD_TBOLT_HOST_DIAG(instance);
3238 	while (!(status & DIAG_WRITE_ENABLE)) {
3239 		delay(100 * drv_usectohz(MILLISEC));
3240 		status = RD_TBOLT_HOST_DIAG(instance);
3241 		if (retry++ >= 100) {
3242 			dev_err(instance->dip, CE_WARN,
3243 			    "%s(): timeout waiting for DRWE.", __func__);
3244 			return (DDI_FAILURE);
3245 		}
3246 	}
3247 
3248 	/* Send reset command */
3249 	WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3250 	delay(100 * drv_usectohz(MILLISEC));
3251 
3252 	/* Wait for reset bit to clear */
3253 	retry = 0;
3254 	status = RD_TBOLT_HOST_DIAG(instance);
3255 	while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3256 		delay(100 * drv_usectohz(MILLISEC));
3257 		status = RD_TBOLT_HOST_DIAG(instance);
3258 		if (retry++ == 100) {
3259 			/* Dont call kill adapter here */
3260 			/* RESET BIT ADAPTER is cleared by firmare */
3261 			/* mrsas_tbolt_kill_adapter(instance); */
3262 			dev_err(instance->dip, CE_WARN,
3263 			    "%s(): RESET FAILED; return failure!!!", __func__);
3264 			return (DDI_FAILURE);
3265 		}
3266 	}
3267 
3268 	con_log(CL_ANN,
3269 	    (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3270 
3271 	abs_state = instance->func_ptr->read_fw_status_reg(instance);
3272 	retry = 0;
3273 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3274 		delay(100 * drv_usectohz(MILLISEC));
3275 		abs_state = instance->func_ptr->read_fw_status_reg(instance);
3276 	}
3277 	if (abs_state <= MFI_STATE_FW_INIT) {
3278 		dev_err(instance->dip, CE_WARN,
3279 		    "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3280 		    "state = 0x%x, RETRY RESET.", abs_state);
3281 		goto retry_reset;
3282 	}
3283 
3284 	/* Mark HBA as bad, if FW is fault after 3 continuous resets */
3285 	if (mfi_state_transition_to_ready(instance) ||
3286 	    mrsas_debug_tbolt_fw_faults_after_ocr == 1) {
3287 		cur_abs_reg_val =
3288 		    instance->func_ptr->read_fw_status_reg(instance);
3289 		fw_state	= cur_abs_reg_val & MFI_STATE_MASK;
3290 
3291 		con_log(CL_ANN1, (CE_NOTE,
3292 		    "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3293 		    "FW state = 0x%x", fw_state));
3294 		if (mrsas_debug_tbolt_fw_faults_after_ocr == 1)
3295 			fw_state = MFI_STATE_FAULT;
3296 
3297 		con_log(CL_ANN,
3298 		    (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3299 		    "FW state = 0x%x", fw_state));
3300 
3301 		if (fw_state == MFI_STATE_FAULT) {
3302 			/* increment the count */
3303 			instance->fw_fault_count_after_ocr++;
3304 			if (instance->fw_fault_count_after_ocr
3305 			    < MAX_FW_RESET_COUNT) {
3306 				dev_err(instance->dip, CE_WARN,
3307 				    "mrsas_tbolt_reset_ppc: "
3308 				    "FW is in fault after OCR count %d "
3309 				    "Retry Reset",
3310 				    instance->fw_fault_count_after_ocr);
3311 				goto retry_reset;
3312 
3313 			} else {
3314 				dev_err(instance->dip, CE_WARN, "%s:"
3315 				    "Max Reset Count exceeded >%d"
3316 				    "Mark HBA as bad, KILL adapter",
3317 				    __func__, MAX_FW_RESET_COUNT);
3318 
3319 				mrsas_tbolt_kill_adapter(instance);
3320 				return (DDI_FAILURE);
3321 			}
3322 		}
3323 	}
3324 
3325 	/* reset the counter as FW is up after OCR */
3326 	instance->fw_fault_count_after_ocr = 0;
3327 
3328 	mrsas_reset_reply_desc(instance);
3329 
3330 	abs_state = mrsas_issue_init_mpi2(instance);
3331 	if (abs_state == (uint32_t)DDI_FAILURE) {
3332 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3333 		    "INIT failed Retrying Reset");
3334 		goto retry_reset;
3335 	}
3336 
3337 	(void) mrsas_print_pending_cmds(instance);
3338 
3339 	instance->func_ptr->enable_intr(instance);
3340 	instance->fw_outstanding = 0;
3341 
3342 	(void) mrsas_issue_pending_cmds(instance);
3343 
3344 	instance->aen_cmd->retry_count_for_ocr = 0;
3345 	instance->aen_cmd->drv_pkt_time = 0;
3346 
3347 	instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3348 
3349 	mutex_enter(&instance->ocr_flags_mtx);
3350 	instance->adapterresetinprogress = 0;
3351 	mutex_exit(&instance->ocr_flags_mtx);
3352 
3353 	dev_err(instance->dip, CE_NOTE, "TBOLT adapter reset successfully");
3354 
3355 	return (DDI_SUCCESS);
3356 }
3357 
3358 /*
3359  * mrsas_sync_map_info -	Returns FW's ld_map structure
3360  * @instance:				Adapter soft state
3361  *
3362  * Issues an internal command (DCMD) to get the FW's controller PD
3363  * list structure.  This information is mainly used to find out SYSTEM
3364  * supported by the FW.
3365  */
3366 
3367 static int
mrsas_tbolt_sync_map_info(struct mrsas_instance * instance)3368 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3369 {
3370 	int			ret = 0, i;
3371 	struct mrsas_cmd	*cmd = NULL;
3372 	struct mrsas_dcmd_frame	*dcmd;
3373 	uint32_t size_sync_info, num_lds;
3374 	LD_TARGET_SYNC *ci = NULL;
3375 	MR_FW_RAID_MAP_ALL *map;
3376 	MR_LD_RAID  *raid;
3377 	LD_TARGET_SYNC *ld_sync;
3378 	uint32_t ci_h = 0;
3379 	uint32_t size_map_info;
3380 
3381 	cmd = get_raid_msg_pkt(instance);
3382 
3383 	if (cmd == NULL) {
3384 		dev_err(instance->dip, CE_WARN,
3385 		    "Failed to get a cmd from free-pool in "
3386 		    "mrsas_tbolt_sync_map_info().");
3387 		return (DDI_FAILURE);
3388 	}
3389 
3390 	/* Clear the frame buffer and assign back the context id */
3391 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3392 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3393 	    cmd->index);
3394 	bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3395 
3396 
3397 	map = instance->ld_map[instance->map_id & 1];
3398 
3399 	num_lds = map->raidMap.ldCount;
3400 
3401 	dcmd = &cmd->frame->dcmd;
3402 
3403 	size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3404 
3405 	con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3406 	    size_sync_info, num_lds));
3407 
3408 	ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3409 
3410 	bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3411 	ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3412 
3413 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3414 
3415 	ld_sync = (LD_TARGET_SYNC *)ci;
3416 
3417 	for (i = 0; i < num_lds; i++, ld_sync++) {
3418 		raid = MR_LdRaidGet(i, map);
3419 
3420 		con_log(CL_ANN1,
3421 		    (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3422 		    i, raid->seqNum, raid->flags.ldSyncRequired));
3423 
3424 		ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3425 
3426 		con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3427 		    i, ld_sync->ldTargetId));
3428 
3429 		ld_sync->seqNum = raid->seqNum;
3430 	}
3431 
3432 
3433 	size_map_info = sizeof (MR_FW_RAID_MAP) +
3434 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3435 
3436 	dcmd->cmd = MFI_CMD_OP_DCMD;
3437 	dcmd->cmd_status = 0xFF;
3438 	dcmd->sge_count = 1;
3439 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3440 	dcmd->timeout = 0;
3441 	dcmd->pad_0 = 0;
3442 	dcmd->data_xfer_len = size_map_info;
3443 	ASSERT(num_lds <= 255);
3444 	dcmd->mbox.b[0] = (U8)num_lds;
3445 	dcmd->mbox.b[1] = 1; /* Pend */
3446 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3447 	dcmd->sgl.sge32[0].phys_addr = ci_h;
3448 	dcmd->sgl.sge32[0].length = size_map_info;
3449 
3450 
3451 	instance->map_update_cmd = cmd;
3452 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3453 
3454 	instance->func_ptr->issue_cmd(cmd, instance);
3455 
3456 	instance->unroll.syncCmd = 1;
3457 	con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3458 
3459 	return (ret);
3460 }
3461 
3462 /*
3463  * abort_syncmap_cmd
3464  */
3465 int
abort_syncmap_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd_to_abort)3466 abort_syncmap_cmd(struct mrsas_instance *instance,
3467     struct mrsas_cmd *cmd_to_abort)
3468 {
3469 	int	ret = 0;
3470 
3471 	struct mrsas_cmd		*cmd;
3472 	struct mrsas_abort_frame	*abort_fr;
3473 
3474 	con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3475 
3476 	cmd = get_raid_msg_mfi_pkt(instance);
3477 
3478 	if (!cmd) {
3479 		dev_err(instance->dip, CE_WARN,
3480 		    "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3481 		return (DDI_FAILURE);
3482 	}
3483 	/* Clear the frame buffer and assign back the context id */
3484 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3485 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3486 	    cmd->index);
3487 
3488 	abort_fr = &cmd->frame->abort;
3489 
3490 	/* prepare and issue the abort frame */
3491 	ddi_put8(cmd->frame_dma_obj.acc_handle,
3492 	    &abort_fr->cmd, MFI_CMD_OP_ABORT);
3493 	ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3494 	    MFI_CMD_STATUS_SYNC_MODE);
3495 	ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3496 	ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3497 	    cmd_to_abort->index);
3498 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3499 	    &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3500 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3501 	    &abort_fr->abort_mfi_phys_addr_hi, 0);
3502 
3503 	cmd->frame_count = 1;
3504 
3505 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3506 
3507 	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3508 		con_log(CL_ANN1, (CE_WARN,
3509 		    "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3510 		ret = -1;
3511 	} else {
3512 		ret = 0;
3513 	}
3514 
3515 	return_raid_msg_mfi_pkt(instance, cmd);
3516 
3517 	atomic_add_16(&instance->fw_outstanding, (-1));
3518 
3519 	return (ret);
3520 }
3521 
3522 /*
3523  * Even though these functions were originally intended for 2208 only, it
3524  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3525  * these two functions would be either in mr_sas.c, or in their own new source
3526  * file.  Since this driver needs some cleanup anyway, keep this portion in
3527  * mind as well.
3528  */
3529 
3530 int
mrsas_tbolt_config_pd(struct mrsas_instance * instance,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)3531 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3532     uint8_t lun, dev_info_t **ldip)
3533 {
3534 	struct scsi_device *sd;
3535 	dev_info_t *child;
3536 	int rval, dtype;
3537 	struct mrsas_tbolt_pd_info *pds = NULL;
3538 
3539 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3540 	    tgt, lun));
3541 
3542 	if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3543 		if (ldip) {
3544 			*ldip = child;
3545 		}
3546 		if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3547 			rval = mrsas_service_evt(instance, tgt, 1,
3548 			    MRSAS_EVT_UNCONFIG_TGT, 0);
3549 			con_log(CL_ANN1, (CE_WARN,
3550 			    "mr_sas:DELETING STALE ENTRY  rval = %d "
3551 			    "tgt id = %d", rval, tgt));
3552 			return (NDI_FAILURE);
3553 		}
3554 		return (NDI_SUCCESS);
3555 	}
3556 
3557 	pds = (struct mrsas_tbolt_pd_info *)
3558 	    kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3559 	mrsas_tbolt_get_pd_info(instance, pds, tgt);
3560 	dtype = pds->scsiDevType;
3561 
3562 	/* Check for Disk */
3563 	if ((dtype == DTYPE_DIRECT)) {
3564 		if ((dtype == DTYPE_DIRECT) &&
3565 		    (LE_16(pds->fwState) != PD_SYSTEM)) {
3566 			kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3567 			return (NDI_FAILURE);
3568 		}
3569 		sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3570 		sd->sd_address.a_hba_tran = instance->tran;
3571 		sd->sd_address.a_target = (uint16_t)tgt;
3572 		sd->sd_address.a_lun = (uint8_t)lun;
3573 
3574 		if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3575 			rval = mrsas_config_scsi_device(instance, sd, ldip);
3576 			dev_err(instance->dip, CE_CONT,
3577 			    "?Phys. device found: tgt %d dtype %d: %s\n",
3578 			    tgt, dtype, sd->sd_inq->inq_vid);
3579 		} else {
3580 			rval = NDI_FAILURE;
3581 			con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3582 			    "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3583 			    tgt, dtype, sd->sd_inq->inq_vid));
3584 		}
3585 
3586 		/* sd_unprobe is blank now. Free buffer manually */
3587 		if (sd->sd_inq) {
3588 			kmem_free(sd->sd_inq, SUN_INQSIZE);
3589 			sd->sd_inq = (struct scsi_inquiry *)NULL;
3590 		}
3591 		kmem_free(sd, sizeof (struct scsi_device));
3592 	} else {
3593 		con_log(CL_ANN1, (CE_NOTE,
3594 		    "?Device not supported: tgt %d lun %d dtype %d",
3595 		    tgt, lun, dtype));
3596 		rval = NDI_FAILURE;
3597 	}
3598 
3599 	kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3600 	con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3601 	    rval));
3602 	return (rval);
3603 }
3604 
3605 static void
mrsas_tbolt_get_pd_info(struct mrsas_instance * instance,struct mrsas_tbolt_pd_info * pds,int tgt)3606 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3607     struct mrsas_tbolt_pd_info *pds, int tgt)
3608 {
3609 	struct mrsas_cmd	*cmd;
3610 	struct mrsas_dcmd_frame	*dcmd;
3611 	dma_obj_t		dcmd_dma_obj;
3612 
3613 	ASSERT(instance->tbolt || instance->skinny);
3614 
3615 	if (instance->tbolt)
3616 		cmd = get_raid_msg_pkt(instance);
3617 	else
3618 		cmd = mrsas_get_mfi_pkt(instance);
3619 
3620 	if (!cmd) {
3621 		con_log(CL_ANN1,
3622 		    (CE_WARN, "Failed to get a cmd for get pd info"));
3623 		return;
3624 	}
3625 
3626 	/* Clear the frame buffer and assign back the context id */
3627 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3628 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3629 	    cmd->index);
3630 
3631 
3632 	dcmd = &cmd->frame->dcmd;
3633 	dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3634 	dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3635 	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3636 	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3637 	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3638 	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3639 
3640 	(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3641 	    DDI_STRUCTURE_LE_ACC);
3642 	bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3643 	bzero(dcmd->mbox.b, 12);
3644 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3645 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3646 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3647 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3648 	    MFI_FRAME_DIR_READ);
3649 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3650 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3651 	    sizeof (struct mrsas_tbolt_pd_info));
3652 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3653 	    MR_DCMD_PD_GET_INFO);
3654 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3655 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3656 	    sizeof (struct mrsas_tbolt_pd_info));
3657 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3658 	    dcmd_dma_obj.dma_cookie[0].dmac_address);
3659 
3660 	cmd->sync_cmd = MRSAS_TRUE;
3661 	cmd->frame_count = 1;
3662 
3663 	if (instance->tbolt)
3664 		mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3665 
3666 	instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3667 
3668 	ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3669 	    (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3670 	    DDI_DEV_AUTOINCR);
3671 	(void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3672 
3673 	if (instance->tbolt)
3674 		return_raid_msg_pkt(instance, cmd);
3675 	else
3676 		mrsas_return_mfi_pkt(instance, cmd);
3677 }
3678