1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2017, Joyent, Inc.
14  */
15 
16 #include <sys/scsi/adapters/smrt/smrt.h>
17 
18 
19 static ddi_dma_attr_t smrt_command_dma_attr = {
20 	.dma_attr_version =		DMA_ATTR_V0,
21 	.dma_attr_addr_lo =		0x00000000,
22 	.dma_attr_addr_hi =		0xFFFFFFFF,
23 	.dma_attr_count_max =		0x00FFFFFF,
24 	.dma_attr_align =		0x20,
25 	.dma_attr_burstsizes =		0x20,
26 	.dma_attr_minxfer =		DMA_UNIT_8,
27 	.dma_attr_maxxfer =		0xFFFFFFFF,
28 	.dma_attr_seg =			0x0000FFFF,
29 	.dma_attr_sgllen =		1,
30 	.dma_attr_granular =		512,
31 	.dma_attr_flags =		0
32 };
33 
34 /*
35  * These device access attributes are for command block allocation, where we do
36  * not use any of the structured byte swapping facilities.
37  */
38 static ddi_device_acc_attr_t smrt_command_dev_attr = {
39 	.devacc_attr_version =		DDI_DEVICE_ATTR_V0,
40 	.devacc_attr_endian_flags =	DDI_NEVERSWAP_ACC,
41 	.devacc_attr_dataorder =	DDI_STRICTORDER_ACC,
42 	.devacc_attr_access =		0
43 };
44 
45 
46 static void smrt_contig_free(smrt_dma_t *);
47 
48 
49 static int
smrt_check_command_type(smrt_command_type_t type)50 smrt_check_command_type(smrt_command_type_t type)
51 {
52 	/*
53 	 * Note that we leave out the default case in order to utilise
54 	 * compiler warnings about missed enum values.
55 	 */
56 	switch (type) {
57 	case SMRT_CMDTYPE_ABORTQ:
58 	case SMRT_CMDTYPE_SCSA:
59 	case SMRT_CMDTYPE_INTERNAL:
60 	case SMRT_CMDTYPE_PREINIT:
61 	case SMRT_CMDTYPE_EVENT:
62 		return (type);
63 	}
64 
65 	panic("unexpected command type");
66 	/* LINTED: E_FUNC_NO_RET_VAL */
67 }
68 
69 static int
smrt_contig_alloc(smrt_t * smrt,smrt_dma_t * smdma,size_t sz,int kmflags,void ** vap,uint32_t * pap)70 smrt_contig_alloc(smrt_t *smrt, smrt_dma_t *smdma, size_t sz, int kmflags,
71     void **vap, uint32_t *pap)
72 {
73 	caddr_t va;
74 	int rv;
75 	dev_info_t *dip = smrt->smrt_dip;
76 	int (*dma_wait)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP :
77 	    DDI_DMA_DONTWAIT;
78 
79 	VERIFY(kmflags == KM_SLEEP || kmflags == KM_NOSLEEP);
80 
81 	/*
82 	 * Ensure we don't try to allocate a second time using the same
83 	 * tracking object.
84 	 */
85 	VERIFY0(smdma->smdma_level);
86 
87 	if ((rv = ddi_dma_alloc_handle(dip, &smrt_command_dma_attr,
88 	    dma_wait, NULL, &smdma->smdma_dma_handle)) != DDI_SUCCESS) {
89 		dev_err(dip, CE_WARN, "DMA handle allocation failed (%x)",
90 		    rv);
91 		goto fail;
92 	}
93 	smdma->smdma_level |= SMRT_DMALEVEL_HANDLE_ALLOC;
94 
95 	if ((rv = ddi_dma_mem_alloc(smdma->smdma_dma_handle, sz,
96 	    &smrt_command_dev_attr, DDI_DMA_CONSISTENT, dma_wait, NULL,
97 	    &va, &smdma->smdma_real_size, &smdma->smdma_acc_handle)) !=
98 	    DDI_SUCCESS) {
99 		dev_err(dip, CE_WARN, "DMA memory allocation failed (%x)", rv);
100 		goto fail;
101 	}
102 	smdma->smdma_level |= SMRT_DMALEVEL_MEMORY_ALLOC;
103 
104 	if ((rv = ddi_dma_addr_bind_handle(smdma->smdma_dma_handle,
105 	    NULL, va, smdma->smdma_real_size,
106 	    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, dma_wait, NULL,
107 	    smdma->smdma_dma_cookies, &smdma->smdma_dma_ncookies)) !=
108 	    DDI_DMA_MAPPED) {
109 		dev_err(dip, CE_WARN, "DMA handle bind failed (%x)", rv);
110 		goto fail;
111 	}
112 	smdma->smdma_level |= SMRT_DMALEVEL_HANDLE_BOUND;
113 
114 	VERIFY3U(smdma->smdma_dma_ncookies, ==, 1);
115 	*pap = smdma->smdma_dma_cookies[0].dmac_address;
116 	*vap = (void *)va;
117 	return (DDI_SUCCESS);
118 
119 fail:
120 	*vap = NULL;
121 	*pap = 0;
122 	smrt_contig_free(smdma);
123 	return (DDI_FAILURE);
124 }
125 
126 static void
smrt_contig_free(smrt_dma_t * smdma)127 smrt_contig_free(smrt_dma_t *smdma)
128 {
129 	if (smdma->smdma_level & SMRT_DMALEVEL_HANDLE_BOUND) {
130 		VERIFY3U(ddi_dma_unbind_handle(smdma->smdma_dma_handle), ==,
131 		    DDI_SUCCESS);
132 
133 		smdma->smdma_level &= ~SMRT_DMALEVEL_HANDLE_BOUND;
134 	}
135 
136 	if (smdma->smdma_level & SMRT_DMALEVEL_MEMORY_ALLOC) {
137 		ddi_dma_mem_free(&smdma->smdma_acc_handle);
138 
139 		smdma->smdma_level &= ~SMRT_DMALEVEL_MEMORY_ALLOC;
140 	}
141 
142 	if (smdma->smdma_level & SMRT_DMALEVEL_HANDLE_ALLOC) {
143 		ddi_dma_free_handle(&smdma->smdma_dma_handle);
144 
145 		smdma->smdma_level &= ~SMRT_DMALEVEL_HANDLE_ALLOC;
146 	}
147 
148 	VERIFY(smdma->smdma_level == 0);
149 	bzero(smdma, sizeof (*smdma));
150 }
151 
152 static smrt_command_t *
smrt_command_alloc_impl(smrt_t * smrt,smrt_command_type_t type,int kmflags)153 smrt_command_alloc_impl(smrt_t *smrt, smrt_command_type_t type, int kmflags)
154 {
155 	smrt_command_t *smcm;
156 
157 	VERIFY(kmflags == KM_SLEEP || kmflags == KM_NOSLEEP);
158 
159 	if ((smcm = kmem_zalloc(sizeof (*smcm), kmflags)) == NULL) {
160 		return (NULL);
161 	}
162 
163 	smcm->smcm_ctlr = smrt;
164 	smcm->smcm_type = smrt_check_command_type(type);
165 
166 	/*
167 	 * Allocate a single contiguous chunk of memory for the command block
168 	 * (smcm_va_cmd) and the error information block (smcm_va_err).  The
169 	 * physical address of each block should be 32-byte aligned.
170 	 */
171 	size_t contig_size = 0;
172 	contig_size += P2ROUNDUP_TYPED(sizeof (CommandList_t), 32, size_t);
173 
174 	size_t errorinfo_offset = contig_size;
175 	contig_size += P2ROUNDUP_TYPED(sizeof (ErrorInfo_t), 32, size_t);
176 
177 	if (smrt_contig_alloc(smrt, &smcm->smcm_contig, contig_size,
178 	    kmflags, (void **)&smcm->smcm_va_cmd, &smcm->smcm_pa_cmd) !=
179 	    DDI_SUCCESS) {
180 		kmem_free(smcm, sizeof (*smcm));
181 		return (NULL);
182 	}
183 
184 	smcm->smcm_va_err = (void *)((caddr_t)smcm->smcm_va_cmd +
185 	    errorinfo_offset);
186 	smcm->smcm_pa_err = smcm->smcm_pa_cmd + errorinfo_offset;
187 
188 	/*
189 	 * Ensure we asked for, and received, the correct physical alignment:
190 	 */
191 	VERIFY0(smcm->smcm_pa_cmd & 0x1f);
192 	VERIFY0(smcm->smcm_pa_err & 0x1f);
193 
194 	/*
195 	 * Populate Fields.
196 	 */
197 	bzero(smcm->smcm_va_cmd, contig_size);
198 	smcm->smcm_va_cmd->ErrDesc.Addr = smcm->smcm_pa_err;
199 	smcm->smcm_va_cmd->ErrDesc.Len = sizeof (ErrorInfo_t);
200 
201 	return (smcm);
202 }
203 
204 smrt_command_t *
smrt_command_alloc_preinit(smrt_t * smrt,size_t datasize,int kmflags)205 smrt_command_alloc_preinit(smrt_t *smrt, size_t datasize, int kmflags)
206 {
207 	smrt_command_t *smcm;
208 
209 	if ((smcm = smrt_command_alloc_impl(smrt, SMRT_CMDTYPE_PREINIT,
210 	    kmflags)) == NULL) {
211 		return (NULL);
212 	}
213 
214 	/*
215 	 * Note that most driver infrastructure has not been initialised at
216 	 * this time.  All commands are submitted to the controller serially,
217 	 * using a pre-specified tag, and are not attached to the command
218 	 * tracking list.
219 	 */
220 	smcm->smcm_tag = SMRT_PRE_TAG_NUMBER;
221 	smcm->smcm_va_cmd->Header.Tag.tag_value = SMRT_PRE_TAG_NUMBER;
222 
223 	if (smrt_command_attach_internal(smrt, smcm, datasize, kmflags) != 0) {
224 		smrt_command_free(smcm);
225 		return (NULL);
226 	}
227 
228 	return (smcm);
229 }
230 
231 smrt_command_t *
smrt_command_alloc(smrt_t * smrt,smrt_command_type_t type,int kmflags)232 smrt_command_alloc(smrt_t *smrt, smrt_command_type_t type, int kmflags)
233 {
234 	smrt_command_t *smcm;
235 
236 	VERIFY(type != SMRT_CMDTYPE_PREINIT);
237 
238 	if ((smcm = smrt_command_alloc_impl(smrt, type, kmflags)) == NULL) {
239 		return (NULL);
240 	}
241 
242 	/*
243 	 * Insert into the per-controller command list.
244 	 */
245 	mutex_enter(&smrt->smrt_mutex);
246 	list_insert_tail(&smrt->smrt_commands, smcm);
247 	mutex_exit(&smrt->smrt_mutex);
248 
249 	return (smcm);
250 }
251 
252 int
smrt_command_attach_internal(smrt_t * smrt,smrt_command_t * smcm,size_t len,int kmflags)253 smrt_command_attach_internal(smrt_t *smrt, smrt_command_t *smcm, size_t len,
254     int kmflags)
255 {
256 	smrt_command_internal_t *smcmi;
257 
258 	VERIFY(kmflags == KM_SLEEP || kmflags == KM_NOSLEEP);
259 	VERIFY3U(len, <=, UINT32_MAX);
260 
261 	if ((smcmi = kmem_zalloc(sizeof (*smcmi), kmflags)) == NULL) {
262 		return (ENOMEM);
263 	}
264 
265 	if (smrt_contig_alloc(smrt, &smcmi->smcmi_contig, len, kmflags,
266 	    &smcmi->smcmi_va, &smcmi->smcmi_pa) != DDI_SUCCESS) {
267 		kmem_free(smcmi, sizeof (*smcmi));
268 		return (ENOMEM);
269 	}
270 
271 	bzero(smcmi->smcmi_va, smcmi->smcmi_len);
272 
273 	smcm->smcm_internal = smcmi;
274 
275 	smcm->smcm_va_cmd->SG[0].Addr = smcmi->smcmi_pa;
276 	smcm->smcm_va_cmd->SG[0].Len = (uint32_t)len;
277 	smcm->smcm_va_cmd->Header.SGList = 1;
278 	smcm->smcm_va_cmd->Header.SGTotal = 1;
279 
280 	return (0);
281 }
282 
283 void
smrt_command_reuse(smrt_command_t * smcm)284 smrt_command_reuse(smrt_command_t *smcm)
285 {
286 	smrt_t *smrt = smcm->smcm_ctlr;
287 
288 	mutex_enter(&smrt->smrt_mutex);
289 
290 	/*
291 	 * Make sure the command is not currently inflight, then
292 	 * reset the command status.
293 	 */
294 	VERIFY(!(smcm->smcm_status & SMRT_CMD_STATUS_INFLIGHT));
295 	smcm->smcm_status = SMRT_CMD_STATUS_REUSED;
296 
297 	/*
298 	 * Ensure we are not trying to reuse a command that is in the finish or
299 	 * abort queue.
300 	 */
301 	VERIFY(!list_link_active(&smcm->smcm_link_abort));
302 	VERIFY(!list_link_active(&smcm->smcm_link_finish));
303 
304 	/*
305 	 * Clear the previous tag value.
306 	 */
307 	smcm->smcm_tag = 0;
308 	smcm->smcm_va_cmd->Header.Tag.tag_value = 0;
309 
310 	mutex_exit(&smrt->smrt_mutex);
311 }
312 
313 void
smrt_command_free(smrt_command_t * smcm)314 smrt_command_free(smrt_command_t *smcm)
315 {
316 	smrt_t *smrt = smcm->smcm_ctlr;
317 
318 	/*
319 	 * Ensure the object we are about to free is not currently in the
320 	 * inflight AVL.
321 	 */
322 	VERIFY(!(smcm->smcm_status & SMRT_CMD_STATUS_INFLIGHT));
323 
324 	if (smcm->smcm_internal != NULL) {
325 		smrt_command_internal_t *smcmi = smcm->smcm_internal;
326 
327 		smrt_contig_free(&smcmi->smcmi_contig);
328 		kmem_free(smcmi, sizeof (*smcmi));
329 	}
330 
331 	smrt_contig_free(&smcm->smcm_contig);
332 
333 	if (smcm->smcm_type != SMRT_CMDTYPE_PREINIT) {
334 		mutex_enter(&smrt->smrt_mutex);
335 
336 		/*
337 		 * Ensure we are not trying to free a command that is in the
338 		 * finish or abort queue.
339 		 */
340 		VERIFY(!list_link_active(&smcm->smcm_link_abort));
341 		VERIFY(!list_link_active(&smcm->smcm_link_finish));
342 
343 		list_remove(&smrt->smrt_commands, smcm);
344 
345 		mutex_exit(&smrt->smrt_mutex);
346 	}
347 
348 	kmem_free(smcm, sizeof (*smcm));
349 }
350 
351 smrt_command_t *
smrt_lookup_inflight(smrt_t * smrt,uint32_t tag)352 smrt_lookup_inflight(smrt_t *smrt, uint32_t tag)
353 {
354 	smrt_command_t srch;
355 
356 	VERIFY(MUTEX_HELD(&smrt->smrt_mutex));
357 
358 	bzero(&srch, sizeof (srch));
359 	srch.smcm_tag = tag;
360 
361 	return (avl_find(&smrt->smrt_inflight, &srch, NULL));
362 }
363