1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the implementation of Driver buffer management
29  * and related helper functions
30  */
31 #include <oce_impl.h>
32 
33 static ddi_dma_attr_t oce_dma_buf_attr = {
34 	DMA_ATTR_V0,		/* version number */
35 	0x0000000000000000ull,	/* low address */
36 	0xFFFFFFFFFFFFFFFFull,	/* high address */
37 	0x00000000FFFFFFFFull,	/* dma counter max */
38 	OCE_DMA_ALIGNMENT,	/* alignment */
39 	0x00000FFF,		/* burst sizes */
40 	0x00000001,		/* minimum transfer size */
41 	0x00000000FFFFFFFFull,	/* maximum transfer size */
42 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
43 	1,			/* scatter/gather list length */
44 	0x00000001,		/* granularity */
45 	0			/* DMA flags */
46 };
47 
48 static ddi_device_acc_attr_t oce_dma_buf_accattr = {
49 	DDI_DEVICE_ATTR_V0,
50 	DDI_NEVERSWAP_ACC,
51 	DDI_STRICTORDER_ACC,
52 };
53 
54 
55 /*
56  * function to allocate a dma buffer for mapping memory va-pa
57  *
58  * dev - software handle to device
59  * size - size of the memory to map
60  * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING
61  *
62  * return pointer to a oce_dma_buf_t structure handling the map
63  *      NULL => failure
64  */
65 oce_dma_buf_t *
66 oce_alloc_dma_buffer(struct oce_dev *dev,
67     uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags)
68 {
69 	oce_dma_buf_t  *dbuf;
70 	ddi_dma_cookie_t cookie;
71 	uint32_t count;
72 	size_t actual_len;
73 	int ret = 0;
74 
75 	ASSERT(size > 0);
76 	/* if NULL use default */
77 	if (dma_attr == NULL) {
78 		dma_attr = &oce_dma_buf_attr;
79 	}
80 
81 	dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
82 	if (dbuf == NULL) {
83 		return (NULL);
84 	}
85 
86 	/* allocate dma handle */
87 	ret = ddi_dma_alloc_handle(dev->dip, dma_attr,
88 	    DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
89 	if (ret != DDI_SUCCESS) {
90 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
91 		    "Failed to allocate DMA handle");
92 		goto handle_fail;
93 	}
94 	/* allocate the DMA-able memory */
95 	ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
96 	    flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
97 	    &actual_len, &dbuf->acc_handle);
98 	if (ret != DDI_SUCCESS) {
99 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
100 		    "Failed to allocate DMA memory");
101 		goto alloc_fail;
102 	}
103 
104 	/* bind handle */
105 	ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
106 	    (struct as *)0, dbuf->base, actual_len,
107 	    DDI_DMA_RDWR | flags,
108 	    DDI_DMA_DONTWAIT, NULL, &cookie, &count);
109 	if (ret != DDI_DMA_MAPPED) {
110 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
111 		    "Failed to bind dma handle");
112 		goto bind_fail;
113 	}
114 	bzero(dbuf->base, actual_len);
115 	dbuf->addr = cookie.dmac_laddress;
116 	dbuf->size = actual_len;
117 	/* usable length */
118 	dbuf->len  = size;
119 	dbuf->num_pages = OCE_NUM_PAGES(size);
120 	return (dbuf);
121 
122 bind_fail:
123 	ddi_dma_mem_free(&dbuf->acc_handle);
124 alloc_fail:
125 	ddi_dma_free_handle(&dbuf->dma_handle);
126 handle_fail:
127 	kmem_free(dbuf, sizeof (oce_dma_buf_t));
128 	return (NULL);
129 } /* oce_dma_alloc_buffer */
130 
131 /*
132  * function to delete a dma buffer
133  *
134  * dev - software handle to device
135  * dbuf - dma obj  to delete
136  *
137  * return none
138  */
139 void
140 oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
141 {
142 	_NOTE(ARGUNUSED(dev));
143 
144 	if (dbuf == NULL) {
145 		return;
146 	}
147 	if (dbuf->dma_handle != NULL) {
148 		(void) ddi_dma_unbind_handle(dbuf->dma_handle);
149 	}
150 	if (dbuf->acc_handle != NULL) {
151 		ddi_dma_mem_free(&dbuf->acc_handle);
152 	}
153 	if (dbuf->dma_handle != NULL) {
154 		ddi_dma_free_handle(&dbuf->dma_handle);
155 	}
156 	kmem_free(dbuf, sizeof (oce_dma_buf_t));
157 } /* oce_free_dma_buffer */
158 
159 /*
160  * function to create a ring buffer
161  *
162  * dev - software handle to the device
163  * num_items - number of items in the ring
164  * item_size - size of an individual item in the ring
165  * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING for ring memory
166  *
167  * return pointer to a ring_buffer structure, NULL on failure
168  */
169 oce_ring_buffer_t *
170 create_ring_buffer(struct oce_dev *dev,
171     uint32_t num_items, uint32_t item_size, uint32_t flags)
172 {
173 	oce_ring_buffer_t *ring;
174 	uint32_t size;
175 
176 	/* allocate the ring buffer */
177 	ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_NOSLEEP);
178 	if (ring == NULL) {
179 		return (NULL);
180 	}
181 
182 	/* get the dbuf defining the ring */
183 	size = num_items * item_size;
184 	ring->dbuf = oce_alloc_dma_buffer(dev, size, NULL, flags);
185 	if (ring->dbuf  == NULL) {
186 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
187 		    "Ring buffer allocation failed");
188 		goto dbuf_fail;
189 	}
190 
191 	/* fill the rest of the ring */
192 	ring->num_items = num_items;
193 	ring->item_size = item_size;
194 	ring->num_used  = 0;
195 	return (ring);
196 
197 dbuf_fail:
198 	kmem_free(ring, sizeof (oce_ring_buffer_t));
199 	return (NULL);
200 } /* create_ring_buffer */
201 
202 /*
203  * function to destroy a ring buffer
204  *
205  * dev - software handle to teh device
206  * ring - the ring buffer to delete
207  *
208  * return none
209  */
210 void
211 destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring)
212 {
213 	ASSERT(dev != NULL);
214 	ASSERT(ring !=  NULL);
215 
216 	/* free the dbuf associated with the ring */
217 	oce_free_dma_buffer(dev, ring->dbuf);
218 	ring->dbuf = NULL;
219 
220 	/* free the ring itself */
221 	kmem_free(ring, sizeof (oce_ring_buffer_t));
222 } /* destroy_ring_buffer */
223 
224 
225 /*
226  * function to enable the fma flags
227  * fm_caps - FM capability flags
228  *
229  * return none
230  */
231 
232 void
233 oce_set_dma_fma_flags(int fm_caps)
234 {
235 	if (fm_caps == DDI_FM_NOT_CAPABLE) {
236 		return;
237 	}
238 
239 	oce_dma_buf_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
240 
241 	if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
242 		oce_dma_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
243 
244 	} else {
245 		oce_dma_buf_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
246 
247 	}
248 } /* oce_set_dma_fma_flags */
249