xref: /illumos-gate/usr/src/uts/i86pc/io/ioat/ioat_ioctl.c (revision 2d6eb4a5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/errno.h>
28 #include <sys/types.h>
29 #include <sys/conf.h>
30 #include <sys/kmem.h>
31 #include <sys/ddi.h>
32 #include <sys/stat.h>
33 #include <sys/sunddi.h>
34 #include <sys/file.h>
35 #include <sys/open.h>
36 #include <sys/modctl.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/sysmacros.h>
39 
40 #include <vm/hat.h>
41 #include <vm/as.h>
42 
43 #include <sys/ioat.h>
44 
45 
46 extern void *ioat_statep;
47 #define	ptob64(x)	(((uint64_t)(x)) << PAGESHIFT)
48 
49 static int ioat_ioctl_rdreg(ioat_state_t *state, void *arg, int mode);
50 #ifdef	DEBUG
51 static int ioat_ioctl_wrreg(ioat_state_t *state, void *arg, int mode);
52 static int ioat_ioctl_test(ioat_state_t *state, void *arg, int mode);
53 #endif
54 
55 /*
56  * ioat_ioctl()
57  */
58 /*ARGSUSED*/
59 int
ioat_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred,int * rval)60 ioat_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred, int *rval)
61 {
62 	ioat_state_t *state;
63 	int instance;
64 	int e;
65 
66 
67 	e = drv_priv(cred);
68 	if (e != 0) {
69 		return (EPERM);
70 	}
71 	instance = getminor(dev);
72 	if (instance == -1) {
73 		return (EBADF);
74 	}
75 	state = ddi_get_soft_state(ioat_statep, instance);
76 	if (state == NULL) {
77 		return (EBADF);
78 	}
79 
80 	switch (cmd) {
81 	case IOAT_IOCTL_READ_REG:
82 		e = ioat_ioctl_rdreg(state, (void *)arg, mode);
83 		break;
84 #ifdef	DEBUG
85 	case IOAT_IOCTL_WRITE_REG:
86 		e = ioat_ioctl_wrreg(state, (void *)arg, mode);
87 		break;
88 	case IOAT_IOCTL_TEST:
89 		e = ioat_ioctl_test(state, (void *)arg, mode);
90 		break;
91 #endif
92 
93 	default:
94 		e = ENXIO;
95 	}
96 
97 	return (e);
98 }
99 
100 
101 /*
102  * ioat_ioctl_rdreg()
103  */
104 static int
ioat_ioctl_rdreg(ioat_state_t * state,void * arg,int mode)105 ioat_ioctl_rdreg(ioat_state_t *state, void *arg, int mode)
106 {
107 	ioat_ioctl_rdreg_t rdreg;
108 	int e;
109 
110 
111 	e = ddi_copyin(arg, &rdreg, sizeof (ioat_ioctl_rdreg_t), mode);
112 	if (e != 0) {
113 		return (EFAULT);
114 	}
115 
116 	/*
117 	 * read a device register, where size is read size in bits, addr is
118 	 * the offset into MMIO registers.
119 	 */
120 	switch (rdreg.size) {
121 	case 8:
122 		rdreg.data = (uint64_t)ddi_get8(state->is_reg_handle,
123 		    (uint8_t *)&state->is_genregs[rdreg.addr]);
124 		break;
125 	case 16:
126 		rdreg.data = (uint64_t)ddi_get16(state->is_reg_handle,
127 		    (uint16_t *)&state->is_genregs[rdreg.addr]);
128 		break;
129 	case 32:
130 		rdreg.data = (uint64_t)ddi_get32(state->is_reg_handle,
131 		    (uint32_t *)&state->is_genregs[rdreg.addr]);
132 		break;
133 	case 64:
134 		rdreg.data = (uint64_t)ddi_get64(state->is_reg_handle,
135 		    (uint64_t *)&state->is_genregs[rdreg.addr]);
136 		break;
137 	default:
138 		return (EFAULT);
139 	}
140 
141 	e = ddi_copyout(&rdreg, arg, sizeof (ioat_ioctl_rdreg_t), mode);
142 	if (e != 0) {
143 		return (EFAULT);
144 	}
145 
146 	return (0);
147 }
148 
149 
150 #ifdef	DEBUG
151 /*
152  * ioat_ioctl_wrreg()
153  */
154 static int
ioat_ioctl_wrreg(ioat_state_t * state,void * arg,int mode)155 ioat_ioctl_wrreg(ioat_state_t *state, void *arg, int mode)
156 {
157 	ioat_ioctl_wrreg_t wrreg;
158 	int e;
159 
160 
161 	e = ddi_copyin(arg, &wrreg, sizeof (ioat_ioctl_wrreg_t), mode);
162 	if (e != 0) {
163 		return (EFAULT);
164 	}
165 
166 	/*
167 	 * write a device register, where size is write size in bits, addr is
168 	 * the offset into MMIO registers.
169 	 */
170 	switch (wrreg.size) {
171 	case 8:
172 		ddi_put8(state->is_reg_handle,
173 		    (uint8_t *)&state->is_genregs[wrreg.addr],
174 		    (uint8_t)wrreg.data);
175 		break;
176 	case 16:
177 		ddi_put16(state->is_reg_handle,
178 		    (uint16_t *)&state->is_genregs[wrreg.addr],
179 		    (uint16_t)wrreg.data);
180 		break;
181 	case 32:
182 		ddi_put32(state->is_reg_handle,
183 		    (uint32_t *)&state->is_genregs[wrreg.addr],
184 		    (uint32_t)wrreg.data);
185 		break;
186 	case 64:
187 		ddi_put64(state->is_reg_handle,
188 		    (uint64_t *)&state->is_genregs[wrreg.addr],
189 		    (uint64_t)wrreg.data);
190 		break;
191 	default:
192 		return (EFAULT);
193 	}
194 
195 	return (0);
196 }
197 
198 
199 /*
200  * ioat_ioctl_test()
201  */
202 /*ARGSUSED*/
203 static int
ioat_ioctl_test(ioat_state_t * state,void * arg,int mode)204 ioat_ioctl_test(ioat_state_t *state, void *arg, int mode)
205 {
206 	dcopy_handle_t channel;
207 	dcopy_cmd_t cmd;
208 	uint8_t *source;
209 	uint_t buf_size;
210 	uint_t poll_cnt;
211 	uint8_t *dest;
212 	uint8_t *buf;
213 	int flags;
214 	int i;
215 	int e;
216 
217 
218 	/* allocate 2 paged aligned 4k pages */
219 	buf_size = 0x1000;
220 	buf = kmem_zalloc((buf_size * 2) + 0x1000, KM_SLEEP);
221 	source = (uint8_t *)(((uintptr_t)buf + PAGEOFFSET) & PAGEMASK);
222 	dest = source + buf_size;
223 
224 	/* Init source buffer */
225 	for (i = 0; i < buf_size; i++) {
226 		source[i] = (uint8_t)(i & 0xFF);
227 	}
228 
229 	/* allocate a DMA channel */
230 	e = dcopy_alloc(DCOPY_SLEEP, &channel);
231 	if (e != DCOPY_SUCCESS) {
232 		cmn_err(CE_CONT, "dcopy_alloc() failed\n");
233 		goto testfail_alloc;
234 	}
235 
236 	/*
237 	 * post 32 DMA copy's from dest to dest.  These will complete in order
238 	 * so they won't stomp on each other. We don't care about the data
239 	 * right now which is why we go dest to dest.
240 	 */
241 	flags = DCOPY_SLEEP;
242 	for (i = 0; i < 32; i++) {
243 		/*
244 		 * if this is the second command, link the commands from here
245 		 * on out. We only want to keep track of the last command. We
246 		 * will poll on the last command completing (which infers that
247 		 * the other commands completed). If any of the previous
248 		 * commands fail, so will the last one. Linking the commands
249 		 * also allows us to only call free for the last command. free
250 		 * will free up the entire chain of commands.
251 		 */
252 		if (i == 1) {
253 			flags |= DCOPY_ALLOC_LINK;
254 		}
255 		e = dcopy_cmd_alloc(channel, flags, &cmd);
256 		if (e != DCOPY_SUCCESS) {
257 			cmn_err(CE_CONT, "dcopy_cmd_alloc() failed\n");
258 			goto testfail_alloc;
259 		}
260 
261 		ASSERT(cmd->dp_version == DCOPY_CMD_V0);
262 		cmd->dp_cmd = DCOPY_CMD_COPY;
263 		cmd->dp_flags = DCOPY_CMD_NOFLAGS;
264 
265 		/* do a bunch of dest to dest DMA's */
266 		cmd->dp.copy.cc_source = ptob64(hat_getpfnum(kas.a_hat,
267 		    (caddr_t)source)) + ((uintptr_t)dest & PAGEOFFSET);
268 		cmd->dp.copy.cc_dest = ptob64(hat_getpfnum(kas.a_hat,
269 		    (caddr_t)dest)) + ((uintptr_t)dest & PAGEOFFSET);
270 		cmd->dp.copy.cc_size = PAGESIZE;
271 
272 		e = dcopy_cmd_post(cmd);
273 		if (e != DCOPY_SUCCESS) {
274 			cmn_err(CE_CONT, "dcopy_post() failed\n");
275 			goto testfail_post;
276 		}
277 	}
278 
279 	e = dcopy_cmd_alloc(channel, flags, &cmd);
280 	if (e != DCOPY_SUCCESS) {
281 		cmn_err(CE_CONT, "dcopy_cmd_alloc() failed\n");
282 		goto testfail_alloc;
283 	}
284 
285 	/* now queue up the DMA we are going to check status and data for  */
286 	cmd->dp_cmd = DCOPY_CMD_COPY;
287 	cmd->dp_flags = DCOPY_CMD_INTR;
288 	cmd->dp.copy.cc_source = ptob64(hat_getpfnum(kas.a_hat,
289 	    (caddr_t)source)) + ((uintptr_t)source & PAGEOFFSET);
290 	cmd->dp.copy.cc_dest = ptob64(hat_getpfnum(kas.a_hat,
291 	    (caddr_t)dest)) + ((uintptr_t)dest & PAGEOFFSET);
292 	cmd->dp.copy.cc_size = PAGESIZE;
293 	e = dcopy_cmd_post(cmd);
294 	if (e != DCOPY_SUCCESS) {
295 		cmn_err(CE_CONT, "dcopy_post() failed\n");
296 		goto testfail_post;
297 	}
298 
299 	/* check the status of the last command */
300 	poll_cnt = 0;
301 	flags = DCOPY_POLL_NOFLAGS;
302 	while ((e = dcopy_cmd_poll(cmd, flags)) == DCOPY_PENDING) {
303 		poll_cnt++;
304 		if (poll_cnt >= 16) {
305 			flags |= DCOPY_POLL_BLOCK;
306 		}
307 	}
308 	if (e != DCOPY_COMPLETED) {
309 		cmn_err(CE_CONT, "dcopy_poll() failed\n");
310 		goto testfail_poll;
311 	}
312 
313 	/* since the cmd's are linked we only need to pass in the last cmd */
314 	dcopy_cmd_free(&cmd);
315 	dcopy_free(&channel);
316 
317 	/* verify the data */
318 	for (i = 0; i < PAGESIZE; i++) {
319 		if (dest[i] != (uint8_t)(i & 0xFF)) {
320 			cmn_err(CE_CONT,
321 			    "dcopy_data_compare() failed, %p[%d]: %x, %x\n",
322 			    (void *)dest, i, dest[i], i & 0xFF);
323 			return (-1);
324 		}
325 	}
326 
327 	kmem_free(buf, (buf_size * 2) + 0x1000);
328 
329 	return (0);
330 
331 testfail_data_compare:
332 testfail_poll:
333 testfail_post:
334 	dcopy_cmd_free(&cmd);
335 	dcopy_free(&channel);
336 testfail_alloc:
337 	kmem_free(buf, (buf_size * 2) + 0x1000);
338 
339 	return (-1);
340 }
341 #endif
342