1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 support code.
14  *
15  * Copyright (C) 2011-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 #include <sys/ddi.h>
24 #include <sys/sunddi.h>
25 #include <sys/queue.h>
26 
27 #include "t4nex.h"
28 #include "common/common.h"
29 #include "common/t4_regs.h"
30 
31 /* helpers */
32 static int pci_rw(struct adapter *sc, void *data, int flags, int write);
33 static int reg_rw(struct adapter *sc, void *data, int flags, int write);
34 static void reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
35     unsigned int end);
36 static int regdump(struct adapter *sc, void *data, int flags);
37 static int get_sge_context(struct adapter *sc, void *data, int flags);
38 static int get_devlog(struct adapter *sc, void *data, int flags);
39 static int read_card_mem(struct adapter *sc, void *data, int flags);
40 static int read_tid_tab(struct adapter *sc, void *data, int flags);
41 static int read_mbox(struct adapter *sc, void *data, int flags);
42 static int read_cim_la(struct adapter *sc, void *data, int flags);
43 static int read_cim_qcfg(struct adapter *sc, void *data, int flags);
44 static int read_cim_ibq(struct adapter *sc, void *data, int flags);
45 static int read_edc(struct adapter *sc, void *data, int flags);
46 
47 int
48 t4_ioctl(struct adapter *sc, int cmd, void *data, int mode)
49 {
50 	int rc = ENOTSUP;
51 
52 	switch (cmd) {
53 	case T4_IOCTL_PCIGET32:
54 	case T4_IOCTL_PCIPUT32:
55 		rc = pci_rw(sc, data, mode, cmd == T4_IOCTL_PCIPUT32);
56 		break;
57 	case T4_IOCTL_GET32:
58 	case T4_IOCTL_PUT32:
59 		rc = reg_rw(sc, data, mode, cmd == T4_IOCTL_PUT32);
60 		break;
61 	case T4_IOCTL_REGDUMP:
62 		rc = regdump(sc, data, mode);
63 		break;
64 	case T4_IOCTL_SGE_CONTEXT:
65 		rc = get_sge_context(sc, data, mode);
66 		break;
67 	case T4_IOCTL_DEVLOG:
68 		rc = get_devlog(sc, data, mode);
69 		break;
70 	case T4_IOCTL_GET_MEM:
71 		rc = read_card_mem(sc, data, mode);
72 		break;
73 	case T4_IOCTL_GET_TID_TAB:
74 		rc = read_tid_tab(sc, data, mode);
75 		break;
76 	case T4_IOCTL_GET_MBOX:
77 		rc = read_mbox(sc, data, mode);
78 		break;
79 	case T4_IOCTL_GET_CIM_LA:
80 		rc = read_cim_la(sc, data, mode);
81 		break;
82 	case T4_IOCTL_GET_CIM_QCFG:
83 		rc = read_cim_qcfg(sc, data, mode);
84 		break;
85 	case T4_IOCTL_GET_CIM_IBQ:
86 		rc = read_cim_ibq(sc, data, mode);
87 		break;
88 	case T4_IOCTL_GET_EDC:
89 		rc = read_edc(sc, data, mode);
90 		break;
91 	default:
92 		return (EINVAL);
93 	}
94 
95 	return (rc);
96 }
97 
98 static int
99 pci_rw(struct adapter *sc, void *data, int flags, int write)
100 {
101 	struct t4_reg32_cmd r;
102 
103 	if (ddi_copyin(data, &r, sizeof (r), flags) < 0)
104 		return (EFAULT);
105 
106 	/* address must be 32 bit aligned */
107 	r.reg &= ~0x3;
108 
109 	if (write != 0)
110 		t4_os_pci_write_cfg4(sc, r.reg, r.value);
111 	else {
112 		t4_os_pci_read_cfg4(sc, r.reg, &r.value);
113 		if (ddi_copyout(&r, data, sizeof (r), flags) < 0)
114 			return (EFAULT);
115 	}
116 
117 	return (0);
118 }
119 
120 static int
121 reg_rw(struct adapter *sc, void *data, int flags, int write)
122 {
123 	struct t4_reg32_cmd r;
124 
125 	if (ddi_copyin(data, &r, sizeof (r), flags) < 0)
126 		return (EFAULT);
127 
128 	/* Register address must be 32 bit aligned */
129 	r.reg &= ~0x3;
130 
131 	if (write != 0)
132 		t4_write_reg(sc, r.reg, r.value);
133 	else {
134 		r.value = t4_read_reg(sc, r.reg);
135 		if (ddi_copyout(&r, data, sizeof (r), flags) < 0)
136 			return (EFAULT);
137 	}
138 
139 	return (0);
140 }
141 
142 static void
143 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
144     unsigned int end)
145 {
146 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
147 	uint32_t *p = (uint32_t *)(buf + start);
148 
149 	for (/* */; start <= end; start += sizeof (uint32_t))
150 		*p++ = t4_read_reg(sc, start);
151 }
152 
153 static int
154 regdump(struct adapter *sc, void *data, int flags)
155 {
156 	struct t4_regdump r;
157 	uint8_t *buf;
158 	int rc = 0, i;
159 	static const unsigned int reg_ranges[] = {
160 		0x1008, 0x1108,
161 		0x1180, 0x11b4,
162 		0x11fc, 0x123c,
163 		0x1300, 0x173c,
164 		0x1800, 0x18fc,
165 		0x3000, 0x30d8,
166 		0x30e0, 0x5924,
167 		0x5960, 0x59d4,
168 		0x5a00, 0x5af8,
169 		0x6000, 0x6098,
170 		0x6100, 0x6150,
171 		0x6200, 0x6208,
172 		0x6240, 0x6248,
173 		0x6280, 0x6338,
174 		0x6370, 0x638c,
175 		0x6400, 0x643c,
176 		0x6500, 0x6524,
177 		0x6a00, 0x6a38,
178 		0x6a60, 0x6a78,
179 		0x6b00, 0x6b84,
180 		0x6bf0, 0x6c84,
181 		0x6cf0, 0x6d84,
182 		0x6df0, 0x6e84,
183 		0x6ef0, 0x6f84,
184 		0x6ff0, 0x7084,
185 		0x70f0, 0x7184,
186 		0x71f0, 0x7284,
187 		0x72f0, 0x7384,
188 		0x73f0, 0x7450,
189 		0x7500, 0x7530,
190 		0x7600, 0x761c,
191 		0x7680, 0x76cc,
192 		0x7700, 0x7798,
193 		0x77c0, 0x77fc,
194 		0x7900, 0x79fc,
195 		0x7b00, 0x7c38,
196 		0x7d00, 0x7efc,
197 		0x8dc0, 0x8e1c,
198 		0x8e30, 0x8e78,
199 		0x8ea0, 0x8f6c,
200 		0x8fc0, 0x9074,
201 		0x90fc, 0x90fc,
202 		0x9400, 0x9458,
203 		0x9600, 0x96bc,
204 		0x9800, 0x9808,
205 		0x9820, 0x983c,
206 		0x9850, 0x9864,
207 		0x9c00, 0x9c6c,
208 		0x9c80, 0x9cec,
209 		0x9d00, 0x9d6c,
210 		0x9d80, 0x9dec,
211 		0x9e00, 0x9e6c,
212 		0x9e80, 0x9eec,
213 		0x9f00, 0x9f6c,
214 		0x9f80, 0x9fec,
215 		0xd004, 0xd03c,
216 		0xdfc0, 0xdfe0,
217 		0xe000, 0xea7c,
218 		0xf000, 0x11190,
219 		0x19040, 0x19124,
220 		0x19150, 0x191b0,
221 		0x191d0, 0x191e8,
222 		0x19238, 0x1924c,
223 		0x193f8, 0x19474,
224 		0x19490, 0x194f8,
225 		0x19800, 0x19f30,
226 		0x1a000, 0x1a06c,
227 		0x1a0b0, 0x1a120,
228 		0x1a128, 0x1a138,
229 		0x1a190, 0x1a1c4,
230 		0x1a1fc, 0x1a1fc,
231 		0x1e040, 0x1e04c,
232 		0x1e240, 0x1e28c,
233 		0x1e2c0, 0x1e2c0,
234 		0x1e2e0, 0x1e2e0,
235 		0x1e300, 0x1e384,
236 		0x1e3c0, 0x1e3c8,
237 		0x1e440, 0x1e44c,
238 		0x1e640, 0x1e68c,
239 		0x1e6c0, 0x1e6c0,
240 		0x1e6e0, 0x1e6e0,
241 		0x1e700, 0x1e784,
242 		0x1e7c0, 0x1e7c8,
243 		0x1e840, 0x1e84c,
244 		0x1ea40, 0x1ea8c,
245 		0x1eac0, 0x1eac0,
246 		0x1eae0, 0x1eae0,
247 		0x1eb00, 0x1eb84,
248 		0x1ebc0, 0x1ebc8,
249 		0x1ec40, 0x1ec4c,
250 		0x1ee40, 0x1ee8c,
251 		0x1eec0, 0x1eec0,
252 		0x1eee0, 0x1eee0,
253 		0x1ef00, 0x1ef84,
254 		0x1efc0, 0x1efc8,
255 		0x1f040, 0x1f04c,
256 		0x1f240, 0x1f28c,
257 		0x1f2c0, 0x1f2c0,
258 		0x1f2e0, 0x1f2e0,
259 		0x1f300, 0x1f384,
260 		0x1f3c0, 0x1f3c8,
261 		0x1f440, 0x1f44c,
262 		0x1f640, 0x1f68c,
263 		0x1f6c0, 0x1f6c0,
264 		0x1f6e0, 0x1f6e0,
265 		0x1f700, 0x1f784,
266 		0x1f7c0, 0x1f7c8,
267 		0x1f840, 0x1f84c,
268 		0x1fa40, 0x1fa8c,
269 		0x1fac0, 0x1fac0,
270 		0x1fae0, 0x1fae0,
271 		0x1fb00, 0x1fb84,
272 		0x1fbc0, 0x1fbc8,
273 		0x1fc40, 0x1fc4c,
274 		0x1fe40, 0x1fe8c,
275 		0x1fec0, 0x1fec0,
276 		0x1fee0, 0x1fee0,
277 		0x1ff00, 0x1ff84,
278 		0x1ffc0, 0x1ffc8,
279 		0x20000, 0x2002c,
280 		0x20100, 0x2013c,
281 		0x20190, 0x201c8,
282 		0x20200, 0x20318,
283 		0x20400, 0x20528,
284 		0x20540, 0x20614,
285 		0x21000, 0x21040,
286 		0x2104c, 0x21060,
287 		0x210c0, 0x210ec,
288 		0x21200, 0x21268,
289 		0x21270, 0x21284,
290 		0x212fc, 0x21388,
291 		0x21400, 0x21404,
292 		0x21500, 0x21518,
293 		0x2152c, 0x2153c,
294 		0x21550, 0x21554,
295 		0x21600, 0x21600,
296 		0x21608, 0x21628,
297 		0x21630, 0x2163c,
298 		0x21700, 0x2171c,
299 		0x21780, 0x2178c,
300 		0x21800, 0x21c38,
301 		0x21c80, 0x21d7c,
302 		0x21e00, 0x21e04,
303 		0x22000, 0x2202c,
304 		0x22100, 0x2213c,
305 		0x22190, 0x221c8,
306 		0x22200, 0x22318,
307 		0x22400, 0x22528,
308 		0x22540, 0x22614,
309 		0x23000, 0x23040,
310 		0x2304c, 0x23060,
311 		0x230c0, 0x230ec,
312 		0x23200, 0x23268,
313 		0x23270, 0x23284,
314 		0x232fc, 0x23388,
315 		0x23400, 0x23404,
316 		0x23500, 0x23518,
317 		0x2352c, 0x2353c,
318 		0x23550, 0x23554,
319 		0x23600, 0x23600,
320 		0x23608, 0x23628,
321 		0x23630, 0x2363c,
322 		0x23700, 0x2371c,
323 		0x23780, 0x2378c,
324 		0x23800, 0x23c38,
325 		0x23c80, 0x23d7c,
326 		0x23e00, 0x23e04,
327 		0x24000, 0x2402c,
328 		0x24100, 0x2413c,
329 		0x24190, 0x241c8,
330 		0x24200, 0x24318,
331 		0x24400, 0x24528,
332 		0x24540, 0x24614,
333 		0x25000, 0x25040,
334 		0x2504c, 0x25060,
335 		0x250c0, 0x250ec,
336 		0x25200, 0x25268,
337 		0x25270, 0x25284,
338 		0x252fc, 0x25388,
339 		0x25400, 0x25404,
340 		0x25500, 0x25518,
341 		0x2552c, 0x2553c,
342 		0x25550, 0x25554,
343 		0x25600, 0x25600,
344 		0x25608, 0x25628,
345 		0x25630, 0x2563c,
346 		0x25700, 0x2571c,
347 		0x25780, 0x2578c,
348 		0x25800, 0x25c38,
349 		0x25c80, 0x25d7c,
350 		0x25e00, 0x25e04,
351 		0x26000, 0x2602c,
352 		0x26100, 0x2613c,
353 		0x26190, 0x261c8,
354 		0x26200, 0x26318,
355 		0x26400, 0x26528,
356 		0x26540, 0x26614,
357 		0x27000, 0x27040,
358 		0x2704c, 0x27060,
359 		0x270c0, 0x270ec,
360 		0x27200, 0x27268,
361 		0x27270, 0x27284,
362 		0x272fc, 0x27388,
363 		0x27400, 0x27404,
364 		0x27500, 0x27518,
365 		0x2752c, 0x2753c,
366 		0x27550, 0x27554,
367 		0x27600, 0x27600,
368 		0x27608, 0x27628,
369 		0x27630, 0x2763c,
370 		0x27700, 0x2771c,
371 		0x27780, 0x2778c,
372 		0x27800, 0x27c38,
373 		0x27c80, 0x27d7c,
374 		0x27e00, 0x27e04
375 	};
376 
377 	if (ddi_copyin(data, &r, sizeof (r), flags) < 0)
378 		return (EFAULT);
379 
380 	if (r.len > T4_REGDUMP_SIZE)
381 		r.len = T4_REGDUMP_SIZE;
382 	else if (r.len < T4_REGDUMP_SIZE)
383 		return (E2BIG);
384 
385 	buf = kmem_zalloc(T4_REGDUMP_SIZE, KM_SLEEP);
386 
387 	r.version = 4 | (sc->params.rev << 10);
388 	for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
389 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
390 
391 	if (ddi_copyout(buf, r.data, r.len, flags) < 0)
392 		rc = EFAULT;
393 
394 	if (rc == 0 && ddi_copyout(&r, data, sizeof (r), flags) < 0)
395 		rc = EFAULT;
396 
397 	kmem_free(buf, T4_REGDUMP_SIZE);
398 
399 	return (rc);
400 }
401 
402 static int
403 get_sge_context(struct adapter *sc, void *data, int flags)
404 {
405 	struct t4_sge_context sgec;
406 	uint32_t buff[SGE_CTXT_SIZE / 4];
407 	int rc = 0;
408 
409 	if (ddi_copyin(data, &sgec, sizeof (sgec), flags) < 0) {
410 		rc = EFAULT;
411 		goto _exit;
412 	}
413 
414 	if (sgec.len < SGE_CTXT_SIZE || sgec.addr > M_CTXTQID) {
415 		rc = EINVAL;
416 		goto _exit;
417 	}
418 
419 	if ((sgec.mem_id != T4_CTXT_EGRESS) && (sgec.mem_id != T4_CTXT_FLM) &&
420 	    (sgec.mem_id != T4_CTXT_INGRESS)) {
421 		rc = EINVAL;
422 		goto _exit;
423 	}
424 
425 	rc = (sc->flags & FW_OK) ?
426 	    -t4_sge_ctxt_rd(sc, sc->mbox, sgec.addr, sgec.mem_id, buff) :
427 	    -t4_sge_ctxt_rd_bd(sc, sgec.addr, sgec.mem_id, buff);
428 	if (rc != 0)
429 		goto _exit;
430 
431 	sgec.version = 4 | (sc->params.rev << 10);
432 
433 	/* copyout data and then t4_sge_context */
434 	rc = ddi_copyout(buff, sgec.data, sgec.len, flags);
435 	if (rc == 0)
436 		rc = ddi_copyout(&sgec, data, sizeof (sgec), flags);
437 	/* if ddi_copyout fails, return EFAULT - for either of the two */
438 	if (rc != 0)
439 		rc = EFAULT;
440 
441 _exit:
442 	return (rc);
443 }
444 
445 static int
446 read_tid_tab(struct adapter *sc, void *data, int flags)
447 {
448 	struct t4_tid_info t4tid;
449 	uint32_t *buf, *b;
450 	struct tid_info *t = &sc->tids;
451 	int rc = 0;
452 
453 	if (ddi_copyin(data, &t4tid, sizeof (t4tid), flags) < 0) {
454 		rc = EFAULT;
455 		goto _exit;
456 	}
457 
458 	buf = b = kmem_zalloc(t4tid.len, KM_NOSLEEP);
459 	if (buf == NULL) {
460 		rc = ENOMEM;
461 		goto _exit;
462 	}
463 
464 	*b++ = t->tids_in_use;
465 	*b++ = t->atids_in_use;
466 	*b = t->stids_in_use;
467 
468 	if (ddi_copyout(buf, t4tid.data, t4tid.len, flags) < 0)
469 		rc = EFAULT;
470 
471 	kmem_free(buf, t4tid.len);
472 
473 _exit:
474 	return (rc);
475 }
476 
477 static int
478 read_card_mem(struct adapter *sc, void *data, int flags)
479 {
480 	struct t4_mem_range mr;
481 	uint32_t base, size, lo, hi, win, off, remaining, i, n;
482 	uint32_t *buf, *b;
483 	int rc = 0;
484 
485 	if (ddi_copyin(data, &mr, sizeof (mr), flags) < 0) {
486 		rc = EFAULT;
487 		goto _exit;
488 	}
489 
490 	/* reads are in multiples of 32 bits */
491 	if (mr.addr & 3 || mr.len & 3 || mr.len == 0) {
492 		rc = EINVAL;
493 		goto _exit;
494 	}
495 
496 	/*
497 	 * We don't want to deal with potential holes so we mandate that the
498 	 * requested region must lie entirely within one of the 3 memories.
499 	 */
500 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
501 	if (lo & F_EDRAM0_ENABLE) {
502 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
503 		base = G_EDRAM0_BASE(hi) << 20;
504 		size = G_EDRAM0_SIZE(hi) << 20;
505 		if (size > 0 &&
506 		    mr.addr >= base && mr.addr < base + size &&
507 		    mr.addr + mr.len <= base + size)
508 			goto proceed;
509 	}
510 	if (lo & F_EDRAM1_ENABLE) {
511 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
512 		base = G_EDRAM1_BASE(hi) << 20;
513 		size = G_EDRAM1_SIZE(hi) << 20;
514 		if (size > 0 &&
515 		    mr.addr >= base && mr.addr < base + size &&
516 		    mr.addr + mr.len <= base + size)
517 			goto proceed;
518 	}
519 	if (lo & F_EXT_MEM_ENABLE) {
520 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
521 		base = G_EXT_MEM_BASE(hi) << 20;
522 		size = G_EXT_MEM_SIZE(hi) << 20;
523 		if (size > 0 &&
524 		    mr.addr >= base && mr.addr < base + size &&
525 		    mr.addr + mr.len <= base + size)
526 			goto proceed;
527 	}
528 	return (ENXIO);
529 
530 proceed:
531 	buf = b = kmem_zalloc(mr.len, KM_NOSLEEP);
532 	if (buf == NULL) {
533 		rc = ENOMEM;
534 		goto _exit;
535 	}
536 
537 	/*
538 	 * Position the PCIe window (we use memwin2) to the 16B aligned area
539 	 * just at/before the requested region.
540 	 */
541 	win = mr.addr & ~0xf;
542 	off = mr.addr - win;  /* offset of the requested region in the win */
543 	remaining = mr.len;
544 
545 	while (remaining) {
546 		t4_write_reg(sc,
547 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
548 		(void) t4_read_reg(sc,
549 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
550 
551 		/* number of bytes that we'll copy in the inner loop */
552 		n = min(remaining, MEMWIN2_APERTURE - off);
553 
554 		for (i = 0; i < n; i += 4, remaining -= 4)
555 			*b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
556 
557 		win += MEMWIN2_APERTURE;
558 		off = 0;
559 	}
560 
561 	if (ddi_copyout(buf, mr.data, mr.len, flags) < 0)
562 		rc = EFAULT;
563 
564 	kmem_free(buf, mr.len);
565 
566 _exit:
567 	return (rc);
568 }
569 
570 static int
571 get_devlog(struct adapter *sc, void *data, int flags)
572 {
573 	struct devlog_params *dparams = &sc->params.devlog;
574 	struct fw_devlog_e *buf;
575 	struct t4_devlog dl;
576 	int rc = 0;
577 
578 	if (ddi_copyin(data, &dl, sizeof (dl), flags) < 0) {
579 		rc = EFAULT;
580 		goto done;
581 	}
582 
583 	if (dparams->start == 0) {
584 		rc = ENXIO;
585 		goto done;
586 	}
587 
588 	if (dl.len < dparams->size) {
589 		dl.len = dparams->size;
590 		rc = ddi_copyout(&dl, data, sizeof (dl), flags);
591 		/*
592 		 * rc = 0 indicates copyout was successful, then return ENOBUFS
593 		 * to indicate that the buffer size was not enough. Return of
594 		 * EFAULT indicates that the copyout was not successful.
595 		 */
596 		rc = (rc == 0) ? ENOBUFS : EFAULT;
597 		goto done;
598 	}
599 
600 	buf = kmem_zalloc(dparams->size, KM_NOSLEEP);
601 	if (buf == NULL) {
602 		rc = ENOMEM;
603 		goto done;
604 	}
605 
606 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
607 	    (void *)buf);
608 	if (rc != 0)
609 		goto done1;
610 
611 	/* Copyout device log buffer and then carrier buffer */
612 	if (ddi_copyout(buf, dl.data, dl.len, flags) < 0)
613 		rc = EFAULT;
614 	else if (ddi_copyout(&dl, data, sizeof (dl), flags) < 0)
615 		rc = EFAULT;
616 
617 done1:
618 	kmem_free(buf, dparams->size);
619 
620 done:
621 	return (rc);
622 }
623 
624 static int
625 read_cim_qcfg(struct adapter *sc, void *data, int flags)
626 {
627 	struct t4_cim_qcfg t4cimqcfg;
628 	int rc = 0;
629 
630 	if (ddi_copyin(data, &t4cimqcfg, sizeof (t4cimqcfg), flags) < 0) {
631 		rc = EFAULT;
632 		goto _exit;
633 	}
634 
635 	rc = t4_cim_read(sc, A_UP_IBQ_0_RDADDR, ARRAY_SIZE(t4cimqcfg.stat),
636 	    t4cimqcfg.stat);
637 
638 	if (rc != 0)
639 		return (rc);
640 
641 	t4_read_cimq_cfg(sc, t4cimqcfg.base, t4cimqcfg.size, t4cimqcfg.thres);
642 
643 	if (ddi_copyout(&t4cimqcfg, data, sizeof (t4cimqcfg), flags) < 0)
644 		rc = EFAULT;
645 
646 _exit:
647 	return (rc);
648 }
649 
650 static int
651 read_edc(struct adapter *sc, void *data, int flags)
652 {
653 	struct t4_edc t4edc;
654 	int rc = 0;
655 	u32 count, pos = 0;
656 	u32 memoffset;
657 	__be32 *edc = NULL;
658 
659 	if (ddi_copyin(data, &t4edc, sizeof (t4edc), flags) < 0) {
660 		rc = EFAULT;
661 		goto _exit;
662 	}
663 
664 	if (t4edc.mem > 2)
665 		goto _exit;
666 
667 	edc = kmem_zalloc(t4edc.len, KM_NOSLEEP);
668 	if (edc == NULL) {
669 		rc = ENOMEM;
670 		goto _exit;
671 	}
672 	/*
673 	 * Offset into the region of memory which is being accessed
674 	 * MEM_EDC0 = 0
675 	 * MEM_EDC1 = 1
676 	 * MEM_MC   = 2
677 	 */
678 	memoffset = (t4edc.mem * (5 * 1024 * 1024));
679 	count = t4edc.len;
680 	pos = t4edc.pos;
681 
682 	while (count) {
683 		u32 len;
684 
685 		rc = t4_mem_win_read(sc, (pos + memoffset), edc);
686 		if (rc != 0) {
687 			kmem_free(edc, t4edc.len);
688 			goto _exit;
689 		}
690 
691 		len = MEMWIN0_APERTURE;
692 		pos += len;
693 		count -= len;
694 	}
695 
696 	if (ddi_copyout(edc, t4edc.data, t4edc.len, flags) < 0)
697 		rc = EFAULT;
698 
699 	kmem_free(edc, t4edc.len);
700 _exit:
701 	return (rc);
702 }
703 
704 static int
705 read_cim_ibq(struct adapter *sc, void *data, int flags)
706 {
707 	struct t4_ibq t4ibq;
708 	int rc = 0;
709 	__be64 *buf;
710 
711 	if (ddi_copyin(data, &t4ibq, sizeof (t4ibq), flags) < 0) {
712 		rc = EFAULT;
713 		goto _exit;
714 	}
715 
716 	buf = kmem_zalloc(t4ibq.len, KM_NOSLEEP);
717 	if (buf == NULL) {
718 		rc = ENOMEM;
719 		goto _exit;
720 	}
721 
722 	rc = t4_read_cim_ibq(sc, 3, (u32 *)buf, CIM_IBQ_SIZE * 4);
723 	if (rc < 0) {
724 		kmem_free(buf, t4ibq.len);
725 		return (rc);
726 	} else
727 		rc = 0;
728 
729 	if (ddi_copyout(buf, t4ibq.data, t4ibq.len, flags) < 0)
730 		rc = EFAULT;
731 
732 	kmem_free(buf, t4ibq.len);
733 
734 _exit:
735 	return (rc);
736 }
737 
738 static int
739 read_cim_la(struct adapter *sc, void *data, int flags)
740 {
741 	struct t4_cim_la t4cimla;
742 	int rc = 0;
743 	unsigned int cfg;
744 	__be64 *buf;
745 
746 	rc = t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
747 	if (rc != 0)
748 		return (rc);
749 
750 	if (ddi_copyin(data, &t4cimla, sizeof (t4cimla), flags) < 0) {
751 		rc = EFAULT;
752 		goto _exit;
753 	}
754 
755 	buf = kmem_zalloc(t4cimla.len, KM_NOSLEEP);
756 	if (buf == NULL) {
757 		rc = ENOMEM;
758 		goto _exit;
759 	}
760 
761 	rc = t4_cim_read_la(sc, (u32 *)buf, NULL);
762 	if (rc != 0) {
763 		kmem_free(buf, t4cimla.len);
764 		return (rc);
765 	}
766 
767 	if (ddi_copyout(buf, t4cimla.data, t4cimla.len, flags) < 0)
768 		rc = EFAULT;
769 
770 	kmem_free(buf, t4cimla.len);
771 
772 _exit:
773 	return (rc);
774 }
775 
776 static int
777 read_mbox(struct adapter *sc, void *data, int flags)
778 {
779 	struct t4_mbox t4mbox;
780 	int rc = 0, i;
781 	__be64 *p, *buf;
782 
783 	u32 data_reg = PF_REG(4, A_CIM_PF_MAILBOX_DATA);
784 
785 	if (ddi_copyin(data, &t4mbox, sizeof (t4mbox), flags) < 0) {
786 		rc = EFAULT;
787 		goto _exit;
788 	}
789 
790 	buf = p = kmem_zalloc(t4mbox.len, KM_NOSLEEP);
791 	if (buf == NULL) {
792 		rc = ENOMEM;
793 		goto _exit;
794 	}
795 
796 	for (i = 0; i < t4mbox.len; i += 8, p++)
797 		*p =  t4_read_reg64(sc, data_reg + i);
798 
799 	if (ddi_copyout(buf, t4mbox.data, t4mbox.len, flags) < 0)
800 		rc = EFAULT;
801 
802 	kmem_free(buf, t4mbox.len);
803 
804 _exit:
805 	return (rc);
806 }
807