1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the implementation of Driver buffer management
29  * and related helper functions
30  */
31 #include <oce_impl.h>
32 
33 static ddi_dma_attr_t oce_dma_buf_attr = {
34 	DMA_ATTR_V0,		/* version number */
35 	0x0000000000000000ull,	/* low address */
36 	0xFFFFFFFFFFFFFFFFull,	/* high address */
37 	0x00000000FFFFFFFFull,	/* dma counter max */
38 	OCE_DMA_ALIGNMENT,	/* alignment */
39 	0x00000FFF,		/* burst sizes */
40 	0x00000001,		/* minimum transfer size */
41 	0x00000000FFFFFFFFull,	/* maximum transfer size */
42 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
43 	1,			/* scatter/gather list length */
44 	0x00000001,		/* granularity */
45 	0			/* DMA flags */
46 };
47 
48 static ddi_device_acc_attr_t oce_dma_buf_accattr = {
49 	DDI_DEVICE_ATTR_V0,
50 	DDI_NEVERSWAP_ACC,
51 	DDI_STRICTORDER_ACC,
52 };
53 
54 
55 /*
56  * function to allocate a dma buffer for mapping memory va-pa
57  *
58  * dev - software handle to device
59  * size - size of the memory to map
60  * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING
61  *
62  * return pointer to a oce_dma_buf_t structure handling the map
63  *      NULL => failure
64  */
65 oce_dma_buf_t *
66 oce_alloc_dma_buffer(struct oce_dev *dev,
67     uint32_t size, uint32_t flags)
68 {
69 	oce_dma_buf_t  *dbuf;
70 	ddi_dma_cookie_t cookie;
71 	uint32_t count;
72 	size_t actual_len;
73 	int ret = 0;
74 
75 	ASSERT(size > 0);
76 
77 	dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
78 	if (dbuf == NULL) {
79 		return (NULL);
80 	}
81 
82 	/* allocate dma handle */
83 	ret = ddi_dma_alloc_handle(dev->dip, &oce_dma_buf_attr,
84 	    DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
85 	if (ret != DDI_SUCCESS) {
86 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
87 		    "Failed to allocate DMA handle");
88 		goto alloc_fail;
89 	}
90 	/* allocate the DMA-able memory */
91 	ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
92 	    flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
93 	    &actual_len, &dbuf->acc_handle);
94 	if (ret != DDI_SUCCESS) {
95 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
96 		    "Failed to allocate DMA memory");
97 		goto alloc_fail;
98 	}
99 
100 	/* bind handle */
101 	ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
102 	    (struct as *)0, dbuf->base, actual_len,
103 	    DDI_DMA_RDWR | flags,
104 	    DDI_DMA_DONTWAIT, NULL, &cookie, &count);
105 	if (ret != DDI_DMA_MAPPED) {
106 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
107 		    "Failed to bind dma handle");
108 		goto alloc_fail;
109 	}
110 	bzero(dbuf->base, actual_len);
111 	dbuf->addr = cookie.dmac_laddress;
112 	dbuf->size = actual_len;
113 	/* usable length */
114 	dbuf->len  = size;
115 	dbuf->num_pages = OCE_NUM_PAGES(size);
116 	return (dbuf);
117 alloc_fail:
118 	oce_free_dma_buffer(dev, dbuf);
119 	return (NULL);
120 } /* oce_dma_alloc_buffer */
121 
122 /*
123  * function to delete a dma buffer
124  *
125  * dev - software handle to device
126  * dbuf - dma obj  to delete
127  *
128  * return none
129  */
130 void
131 oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
132 {
133 	_NOTE(ARGUNUSED(dev));
134 
135 	if (dbuf == NULL) {
136 		return;
137 	}
138 	if (dbuf->dma_handle != NULL) {
139 		(void) ddi_dma_unbind_handle(dbuf->dma_handle);
140 	}
141 	if (dbuf->acc_handle != NULL) {
142 		ddi_dma_mem_free(&dbuf->acc_handle);
143 	}
144 	if (dbuf->dma_handle != NULL) {
145 		ddi_dma_free_handle(&dbuf->dma_handle);
146 	}
147 	kmem_free(dbuf, sizeof (oce_dma_buf_t));
148 } /* oce_free_dma_buffer */
149 
150 /*
151  * function to create a ring buffer
152  *
153  * dev - software handle to the device
154  * num_items - number of items in the ring
155  * item_size - size of an individual item in the ring
156  * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING for ring memory
157  *
158  * return pointer to a ring_buffer structure, NULL on failure
159  */
160 oce_ring_buffer_t *
161 create_ring_buffer(struct oce_dev *dev,
162     uint32_t num_items, uint32_t item_size, uint32_t flags)
163 {
164 	oce_ring_buffer_t *ring;
165 	uint32_t size;
166 
167 	/* allocate the ring buffer */
168 	ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_NOSLEEP);
169 	if (ring == NULL) {
170 		return (NULL);
171 	}
172 
173 	/* get the dbuf defining the ring */
174 	size = num_items * item_size;
175 	ring->dbuf = oce_alloc_dma_buffer(dev, size, flags);
176 	if (ring->dbuf  == NULL) {
177 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
178 		    "Ring buffer allocation failed");
179 		goto dbuf_fail;
180 	}
181 
182 	/* fill the rest of the ring */
183 	ring->num_items = num_items;
184 	ring->item_size = item_size;
185 	ring->num_used  = 0;
186 	return (ring);
187 
188 dbuf_fail:
189 	kmem_free(ring, sizeof (oce_ring_buffer_t));
190 	return (NULL);
191 } /* create_ring_buffer */
192 
193 /*
194  * function to destroy a ring buffer
195  *
196  * dev - software handle to teh device
197  * ring - the ring buffer to delete
198  *
199  * return none
200  */
201 void
202 destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring)
203 {
204 	ASSERT(dev != NULL);
205 	ASSERT(ring !=  NULL);
206 
207 	/* free the dbuf associated with the ring */
208 	oce_free_dma_buffer(dev, ring->dbuf);
209 	ring->dbuf = NULL;
210 
211 	/* free the ring itself */
212 	kmem_free(ring, sizeof (oce_ring_buffer_t));
213 } /* destroy_ring_buffer */
214 
215 
216 /*
217  * function to enable the fma flags
218  * fm_caps - FM capability flags
219  *
220  * return none
221  */
222 
223 void
224 oce_set_dma_fma_flags(int fm_caps)
225 {
226 	if (fm_caps == DDI_FM_NOT_CAPABLE) {
227 		return;
228 	}
229 
230 	if (DDI_FM_ACC_ERR_CAP(fm_caps)) {
231 		oce_dma_buf_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
232 	} else {
233 		oce_dma_buf_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
234 	}
235 
236 	if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
237 		oce_dma_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
238 
239 	} else {
240 		oce_dma_buf_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
241 
242 	}
243 } /* oce_set_dma_fma_flags */
244