xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/cudbg_lib.c (revision 7e6ad4690aa1d3cbf260ef54d5e9632ae40a782e)
1*7e6ad469SVishal Kulkarni /*
2*7e6ad469SVishal Kulkarni  * This file and its contents are supplied under the terms of the
3*7e6ad469SVishal Kulkarni  * Common Development and Distribution License ("CDDL"), version 1.0.
4*7e6ad469SVishal Kulkarni  * You may only use this file in accordance with the terms of version
5*7e6ad469SVishal Kulkarni  * 1.0 of the CDDL.
6*7e6ad469SVishal Kulkarni  *
7*7e6ad469SVishal Kulkarni  * A full copy of the text of the CDDL should have accompanied this
8*7e6ad469SVishal Kulkarni  * source. A copy of the CDDL is also available via the Internet at
9*7e6ad469SVishal Kulkarni  * http://www.illumos.org/license/CDDL.
10*7e6ad469SVishal Kulkarni  */
11*7e6ad469SVishal Kulkarni 
12*7e6ad469SVishal Kulkarni /*-
13*7e6ad469SVishal Kulkarni  * Copyright (c) 2019 Chelsio Communications, Inc.
14*7e6ad469SVishal Kulkarni  * All rights reserved.
15*7e6ad469SVishal Kulkarni  *
16*7e6ad469SVishal Kulkarni  * Redistribution and use in source and binary forms, with or without
17*7e6ad469SVishal Kulkarni  * modification, are permitted provided that the following conditions
18*7e6ad469SVishal Kulkarni  * are met:
19*7e6ad469SVishal Kulkarni  * 1. Redistributions of source code must retain the above copyright
20*7e6ad469SVishal Kulkarni  *    notice, this list of conditions and the following disclaimer.
21*7e6ad469SVishal Kulkarni  * 2. Redistributions in binary form must reproduce the above copyright
22*7e6ad469SVishal Kulkarni  *    notice, this list of conditions and the following disclaimer in the
23*7e6ad469SVishal Kulkarni  *    documentation and/or other materials provided with the distribution.
24*7e6ad469SVishal Kulkarni  *
25*7e6ad469SVishal Kulkarni  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26*7e6ad469SVishal Kulkarni  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27*7e6ad469SVishal Kulkarni  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28*7e6ad469SVishal Kulkarni  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29*7e6ad469SVishal Kulkarni  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30*7e6ad469SVishal Kulkarni  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31*7e6ad469SVishal Kulkarni  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32*7e6ad469SVishal Kulkarni  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33*7e6ad469SVishal Kulkarni  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34*7e6ad469SVishal Kulkarni  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35*7e6ad469SVishal Kulkarni  * SUCH DAMAGE.
36*7e6ad469SVishal Kulkarni  */
37*7e6ad469SVishal Kulkarni 
38*7e6ad469SVishal Kulkarni #include <sys/types.h>
39*7e6ad469SVishal Kulkarni #include <sys/param.h>
40*7e6ad469SVishal Kulkarni 
41*7e6ad469SVishal Kulkarni #include "common/common.h"
42*7e6ad469SVishal Kulkarni #include "common/t4_regs.h"
43*7e6ad469SVishal Kulkarni #include "common/t4_chip_type.h"
44*7e6ad469SVishal Kulkarni #include "cudbg.h"
45*7e6ad469SVishal Kulkarni #include "cudbg_lib_common.h"
46*7e6ad469SVishal Kulkarni #include "cudbg_lib.h"
47*7e6ad469SVishal Kulkarni #include "cudbg_entity.h"
48*7e6ad469SVishal Kulkarni 
49*7e6ad469SVishal Kulkarni #define  BUFFER_WARN_LIMIT 10000000
50*7e6ad469SVishal Kulkarni 
51*7e6ad469SVishal Kulkarni struct large_entity large_entity_list[] = {
52*7e6ad469SVishal Kulkarni 	{CUDBG_EDC0, 0, 0},
53*7e6ad469SVishal Kulkarni 	{CUDBG_EDC1, 0 , 0},
54*7e6ad469SVishal Kulkarni 	{CUDBG_MC0, 0, 0},
55*7e6ad469SVishal Kulkarni 	{CUDBG_MC1, 0, 0}
56*7e6ad469SVishal Kulkarni };
57*7e6ad469SVishal Kulkarni 
58*7e6ad469SVishal Kulkarni static int
59*7e6ad469SVishal Kulkarni is_fw_attached(struct cudbg_init *pdbg_init)
60*7e6ad469SVishal Kulkarni {
61*7e6ad469SVishal Kulkarni 
62*7e6ad469SVishal Kulkarni 	return (pdbg_init->adap->flags & FW_OK);
63*7e6ad469SVishal Kulkarni }
64*7e6ad469SVishal Kulkarni 
65*7e6ad469SVishal Kulkarni /* This function will add additional padding bytes into debug_buffer to make it
66*7e6ad469SVishal Kulkarni  * 4 byte aligned.*/
67*7e6ad469SVishal Kulkarni static void
68*7e6ad469SVishal Kulkarni align_debug_buffer(struct cudbg_buffer *dbg_buff,
69*7e6ad469SVishal Kulkarni 		   struct cudbg_entity_hdr *entity_hdr)
70*7e6ad469SVishal Kulkarni {
71*7e6ad469SVishal Kulkarni 	u8 zero_buf[4] = {0};
72*7e6ad469SVishal Kulkarni 	u8 padding, remain;
73*7e6ad469SVishal Kulkarni 
74*7e6ad469SVishal Kulkarni 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
75*7e6ad469SVishal Kulkarni 	padding = 4 - remain;
76*7e6ad469SVishal Kulkarni 	if (remain) {
77*7e6ad469SVishal Kulkarni 		memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
78*7e6ad469SVishal Kulkarni 		       padding);
79*7e6ad469SVishal Kulkarni 		dbg_buff->offset += padding;
80*7e6ad469SVishal Kulkarni 		entity_hdr->num_pad = padding;
81*7e6ad469SVishal Kulkarni 	}
82*7e6ad469SVishal Kulkarni 
83*7e6ad469SVishal Kulkarni 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
84*7e6ad469SVishal Kulkarni }
85*7e6ad469SVishal Kulkarni 
86*7e6ad469SVishal Kulkarni static void
87*7e6ad469SVishal Kulkarni u32_swap(void *a, void *b, int size)
88*7e6ad469SVishal Kulkarni {
89*7e6ad469SVishal Kulkarni         u32 t = *(u32 *)a;
90*7e6ad469SVishal Kulkarni 
91*7e6ad469SVishal Kulkarni         *(u32 *)a = *(u32 *)b;
92*7e6ad469SVishal Kulkarni         *(u32 *)b = t;
93*7e6ad469SVishal Kulkarni }
94*7e6ad469SVishal Kulkarni 
95*7e6ad469SVishal Kulkarni static void
96*7e6ad469SVishal Kulkarni generic_swap(void *a1, void *b1, int size)
97*7e6ad469SVishal Kulkarni {
98*7e6ad469SVishal Kulkarni 	u8 t;
99*7e6ad469SVishal Kulkarni 	u8 *a = (u8 *)a1;
100*7e6ad469SVishal Kulkarni 	u8 *b = (u8 *)b1;
101*7e6ad469SVishal Kulkarni 
102*7e6ad469SVishal Kulkarni 	do {
103*7e6ad469SVishal Kulkarni 		t = *a;
104*7e6ad469SVishal Kulkarni 		*(a++) = *b;
105*7e6ad469SVishal Kulkarni 		*(b++) = t;
106*7e6ad469SVishal Kulkarni 	} while (--size > 0);
107*7e6ad469SVishal Kulkarni }
108*7e6ad469SVishal Kulkarni 
109*7e6ad469SVishal Kulkarni static void
110*7e6ad469SVishal Kulkarni qsort(void *base_val, int num, int size,
111*7e6ad469SVishal Kulkarni       int (*cmp_func)(const void *, const void *),
112*7e6ad469SVishal Kulkarni       void (*swap_func)(void *, void *, int size))
113*7e6ad469SVishal Kulkarni {
114*7e6ad469SVishal Kulkarni 	/* pre-scale counters for performance */
115*7e6ad469SVishal Kulkarni 	int i = (num / 2 - 1) * size;
116*7e6ad469SVishal Kulkarni 	int n = num * size;
117*7e6ad469SVishal Kulkarni 	int c, r;
118*7e6ad469SVishal Kulkarni 	u8 *base = (u8 *)base_val;
119*7e6ad469SVishal Kulkarni 
120*7e6ad469SVishal Kulkarni 	if (!swap_func)
121*7e6ad469SVishal Kulkarni 		swap_func = (size == 4 ? u32_swap : generic_swap);
122*7e6ad469SVishal Kulkarni 
123*7e6ad469SVishal Kulkarni 	/* heapify */
124*7e6ad469SVishal Kulkarni 	for (; i >= 0; i -= size) {
125*7e6ad469SVishal Kulkarni 		for (r = i; r * 2 + size < n; r  = c) {
126*7e6ad469SVishal Kulkarni 			c = r * 2 + size;
127*7e6ad469SVishal Kulkarni 			if (c < n - size &&
128*7e6ad469SVishal Kulkarni 					cmp_func(base + c, base + c + size) < 0)
129*7e6ad469SVishal Kulkarni 				c += size;
130*7e6ad469SVishal Kulkarni 			if (cmp_func(base + r, base + c) >= 0)
131*7e6ad469SVishal Kulkarni 				break;
132*7e6ad469SVishal Kulkarni 			swap_func(base + r, base + c, size);
133*7e6ad469SVishal Kulkarni 		}
134*7e6ad469SVishal Kulkarni 	}
135*7e6ad469SVishal Kulkarni 
136*7e6ad469SVishal Kulkarni 	/* sort */
137*7e6ad469SVishal Kulkarni 	for (i = n - size; i > 0; i -= size) {
138*7e6ad469SVishal Kulkarni 		swap_func(base, base + i, size);
139*7e6ad469SVishal Kulkarni 		for (r = 0; r * 2 + size < i; r = c) {
140*7e6ad469SVishal Kulkarni 			c = r * 2 + size;
141*7e6ad469SVishal Kulkarni 			if (c < i - size &&
142*7e6ad469SVishal Kulkarni 					cmp_func(base + c, base + c + size) < 0)
143*7e6ad469SVishal Kulkarni 				c += size;
144*7e6ad469SVishal Kulkarni 			if (cmp_func(base + r, base + c) >= 0)
145*7e6ad469SVishal Kulkarni 				break;
146*7e6ad469SVishal Kulkarni 			swap_func(base + r, base + c, size);
147*7e6ad469SVishal Kulkarni 		}
148*7e6ad469SVishal Kulkarni 	}
149*7e6ad469SVishal Kulkarni }
150*7e6ad469SVishal Kulkarni 
151*7e6ad469SVishal Kulkarni static void
152*7e6ad469SVishal Kulkarni read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
153*7e6ad469SVishal Kulkarni 	      enum ctxt_type ctype, u32 *data)
154*7e6ad469SVishal Kulkarni {
155*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
156*7e6ad469SVishal Kulkarni 	int rc = -1;
157*7e6ad469SVishal Kulkarni 
158*7e6ad469SVishal Kulkarni 	if (is_fw_attached(pdbg_init)) {
159*7e6ad469SVishal Kulkarni 		rc =begin_synchronized_op(padap->port[0], 1, 1);
160*7e6ad469SVishal Kulkarni 		if (rc != 0)
161*7e6ad469SVishal Kulkarni 			goto out;
162*7e6ad469SVishal Kulkarni 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
163*7e6ad469SVishal Kulkarni 				    data);
164*7e6ad469SVishal Kulkarni 		end_synchronized_op(padap->port[0], 1);
165*7e6ad469SVishal Kulkarni 	}
166*7e6ad469SVishal Kulkarni 
167*7e6ad469SVishal Kulkarni out:
168*7e6ad469SVishal Kulkarni 	if (rc)
169*7e6ad469SVishal Kulkarni 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
170*7e6ad469SVishal Kulkarni }
171*7e6ad469SVishal Kulkarni 
172*7e6ad469SVishal Kulkarni static int
173*7e6ad469SVishal Kulkarni get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
174*7e6ad469SVishal Kulkarni 			struct cudbg_buffer *dbg_buff,
175*7e6ad469SVishal Kulkarni 			struct cudbg_entity_hdr **entity_hdr)
176*7e6ad469SVishal Kulkarni {
177*7e6ad469SVishal Kulkarni 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
178*7e6ad469SVishal Kulkarni 	int rc = 0;
179*7e6ad469SVishal Kulkarni 	u32 ext_offset = cudbg_hdr->data_len;
180*7e6ad469SVishal Kulkarni 	*ext_size = 0;
181*7e6ad469SVishal Kulkarni 
182*7e6ad469SVishal Kulkarni 	if (dbg_buff->size - dbg_buff->offset <=
183*7e6ad469SVishal Kulkarni 		 sizeof(struct cudbg_entity_hdr)) {
184*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_BUFFER_SHORT;
185*7e6ad469SVishal Kulkarni 		goto err;
186*7e6ad469SVishal Kulkarni 	}
187*7e6ad469SVishal Kulkarni 
188*7e6ad469SVishal Kulkarni 	*entity_hdr = (struct cudbg_entity_hdr *)
189*7e6ad469SVishal Kulkarni 		       ((char *)outbuf + cudbg_hdr->data_len);
190*7e6ad469SVishal Kulkarni 
191*7e6ad469SVishal Kulkarni 	/* Find the last extended entity header */
192*7e6ad469SVishal Kulkarni 	while ((*entity_hdr)->size) {
193*7e6ad469SVishal Kulkarni 
194*7e6ad469SVishal Kulkarni 		ext_offset += sizeof(struct cudbg_entity_hdr) +
195*7e6ad469SVishal Kulkarni 				     (*entity_hdr)->size;
196*7e6ad469SVishal Kulkarni 
197*7e6ad469SVishal Kulkarni 		*ext_size += (*entity_hdr)->size +
198*7e6ad469SVishal Kulkarni 			      sizeof(struct cudbg_entity_hdr);
199*7e6ad469SVishal Kulkarni 
200*7e6ad469SVishal Kulkarni 		if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
201*7e6ad469SVishal Kulkarni 			sizeof(struct cudbg_entity_hdr)) {
202*7e6ad469SVishal Kulkarni 			rc = CUDBG_STATUS_BUFFER_SHORT;
203*7e6ad469SVishal Kulkarni 			goto err;
204*7e6ad469SVishal Kulkarni 		}
205*7e6ad469SVishal Kulkarni 
206*7e6ad469SVishal Kulkarni 		if (ext_offset != (*entity_hdr)->next_ext_offset) {
207*7e6ad469SVishal Kulkarni 			ext_offset -= sizeof(struct cudbg_entity_hdr) +
208*7e6ad469SVishal Kulkarni 				     (*entity_hdr)->size;
209*7e6ad469SVishal Kulkarni 			break;
210*7e6ad469SVishal Kulkarni 		}
211*7e6ad469SVishal Kulkarni 
212*7e6ad469SVishal Kulkarni 		(*entity_hdr)->next_ext_offset = *ext_size;
213*7e6ad469SVishal Kulkarni 
214*7e6ad469SVishal Kulkarni 		*entity_hdr = (struct cudbg_entity_hdr *)
215*7e6ad469SVishal Kulkarni 					   ((char *)outbuf +
216*7e6ad469SVishal Kulkarni 					   ext_offset);
217*7e6ad469SVishal Kulkarni 	}
218*7e6ad469SVishal Kulkarni 
219*7e6ad469SVishal Kulkarni 	/* update the data offset */
220*7e6ad469SVishal Kulkarni 	dbg_buff->offset = ext_offset;
221*7e6ad469SVishal Kulkarni err:
222*7e6ad469SVishal Kulkarni 	return rc;
223*7e6ad469SVishal Kulkarni }
224*7e6ad469SVishal Kulkarni 
225*7e6ad469SVishal Kulkarni static int
226*7e6ad469SVishal Kulkarni wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
227*7e6ad469SVishal Kulkarni 		   u32 cur_entity_data_offset,
228*7e6ad469SVishal Kulkarni 		   u32 cur_entity_size,
229*7e6ad469SVishal Kulkarni 		   int entity_nu, u32 ext_size)
230*7e6ad469SVishal Kulkarni {
231*7e6ad469SVishal Kulkarni 	struct cudbg_private *priv = handle;
232*7e6ad469SVishal Kulkarni 	struct cudbg_init *cudbg_init = &priv->dbg_init;
233*7e6ad469SVishal Kulkarni 	struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
234*7e6ad469SVishal Kulkarni 	struct adapter *adap = cudbg_init->adap;
235*7e6ad469SVishal Kulkarni 	u64 timestamp;
236*7e6ad469SVishal Kulkarni 	u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
237*7e6ad469SVishal Kulkarni 	u32 remain_flash_size;
238*7e6ad469SVishal Kulkarni 	u32 flash_data_offset;
239*7e6ad469SVishal Kulkarni 	u32 data_hdr_size;
240*7e6ad469SVishal Kulkarni 	int rc = -1;
241*7e6ad469SVishal Kulkarni 
242*7e6ad469SVishal Kulkarni 	data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
243*7e6ad469SVishal Kulkarni 			sizeof(struct cudbg_hdr);
244*7e6ad469SVishal Kulkarni 
245*7e6ad469SVishal Kulkarni 	flash_data_offset = (FLASH_CUDBG_NSECS *
246*7e6ad469SVishal Kulkarni 			     (sizeof(struct cudbg_flash_hdr) +
247*7e6ad469SVishal Kulkarni 			      data_hdr_size)) +
248*7e6ad469SVishal Kulkarni 			    (cur_entity_data_offset - data_hdr_size);
249*7e6ad469SVishal Kulkarni 
250*7e6ad469SVishal Kulkarni 	if (flash_data_offset > CUDBG_FLASH_SIZE) {
251*7e6ad469SVishal Kulkarni 		update_skip_size(sec_info, cur_entity_size);
252*7e6ad469SVishal Kulkarni 		if (cudbg_init->verbose)
253*7e6ad469SVishal Kulkarni 			cudbg_init->print(adap->dip, CE_NOTE,
254*7e6ad469SVishal Kulkarni 					  "Large entity skipping...\n");
255*7e6ad469SVishal Kulkarni 		return rc;
256*7e6ad469SVishal Kulkarni 	}
257*7e6ad469SVishal Kulkarni 
258*7e6ad469SVishal Kulkarni 	remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
259*7e6ad469SVishal Kulkarni 
260*7e6ad469SVishal Kulkarni 	if (cur_entity_size > remain_flash_size) {
261*7e6ad469SVishal Kulkarni 		update_skip_size(sec_info, cur_entity_size);
262*7e6ad469SVishal Kulkarni 		if (cudbg_init->verbose)
263*7e6ad469SVishal Kulkarni 			cudbg_init->print(adap->dip, CE_NOTE,
264*7e6ad469SVishal Kulkarni 					  "Large entity skipping...\n");
265*7e6ad469SVishal Kulkarni 	} else {
266*7e6ad469SVishal Kulkarni 		timestamp = 0;
267*7e6ad469SVishal Kulkarni 
268*7e6ad469SVishal Kulkarni 		cur_entity_hdr_offset +=
269*7e6ad469SVishal Kulkarni 			(sizeof(struct cudbg_entity_hdr) *
270*7e6ad469SVishal Kulkarni 			(entity_nu - 1));
271*7e6ad469SVishal Kulkarni 
272*7e6ad469SVishal Kulkarni 		rc = cudbg_write_flash(handle, timestamp, dbg_buff,
273*7e6ad469SVishal Kulkarni 				       cur_entity_data_offset,
274*7e6ad469SVishal Kulkarni 				       cur_entity_hdr_offset,
275*7e6ad469SVishal Kulkarni 				       cur_entity_size,
276*7e6ad469SVishal Kulkarni 				       ext_size);
277*7e6ad469SVishal Kulkarni 		if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
278*7e6ad469SVishal Kulkarni 			cudbg_init->print(adap->dip, CE_NOTE,
279*7e6ad469SVishal Kulkarni 					  "\n\tFLASH is full... "
280*7e6ad469SVishal Kulkarni 					  "can not write in flash more\n\n");
281*7e6ad469SVishal Kulkarni 	}
282*7e6ad469SVishal Kulkarni 
283*7e6ad469SVishal Kulkarni 	return rc;
284*7e6ad469SVishal Kulkarni }
285*7e6ad469SVishal Kulkarni 
286*7e6ad469SVishal Kulkarni int
287*7e6ad469SVishal Kulkarni cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
288*7e6ad469SVishal Kulkarni {
289*7e6ad469SVishal Kulkarni 	struct cudbg_entity_hdr *entity_hdr = NULL;
290*7e6ad469SVishal Kulkarni 	struct cudbg_entity_hdr *ext_entity_hdr = NULL;
291*7e6ad469SVishal Kulkarni 	struct cudbg_hdr *cudbg_hdr;
292*7e6ad469SVishal Kulkarni 	struct cudbg_buffer dbg_buff;
293*7e6ad469SVishal Kulkarni 	struct cudbg_error cudbg_err = {0};
294*7e6ad469SVishal Kulkarni 	int large_entity_code;
295*7e6ad469SVishal Kulkarni 
296*7e6ad469SVishal Kulkarni 	u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
297*7e6ad469SVishal Kulkarni 	struct cudbg_init *cudbg_init =
298*7e6ad469SVishal Kulkarni 		&(((struct cudbg_private *)handle)->dbg_init);
299*7e6ad469SVishal Kulkarni 	struct adapter *padap = cudbg_init->adap;
300*7e6ad469SVishal Kulkarni 	u32 total_size, remaining_buf_size;
301*7e6ad469SVishal Kulkarni 	u32 ext_size = 0;
302*7e6ad469SVishal Kulkarni 	int index, bit, i, rc = -1;
303*7e6ad469SVishal Kulkarni 	int all;
304*7e6ad469SVishal Kulkarni 	bool flag_ext = 0;
305*7e6ad469SVishal Kulkarni 
306*7e6ad469SVishal Kulkarni 	reset_skip_entity();
307*7e6ad469SVishal Kulkarni 
308*7e6ad469SVishal Kulkarni 	dbg_buff.data = outbuf;
309*7e6ad469SVishal Kulkarni 	dbg_buff.size = *outbuf_size;
310*7e6ad469SVishal Kulkarni 	dbg_buff.offset = 0;
311*7e6ad469SVishal Kulkarni 
312*7e6ad469SVishal Kulkarni 	cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
313*7e6ad469SVishal Kulkarni 	cudbg_hdr->signature = CUDBG_SIGNATURE;
314*7e6ad469SVishal Kulkarni 	cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
315*7e6ad469SVishal Kulkarni 	cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
316*7e6ad469SVishal Kulkarni 	cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
317*7e6ad469SVishal Kulkarni 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
318*7e6ad469SVishal Kulkarni 	cudbg_hdr->chip_ver = padap->params.chip;
319*7e6ad469SVishal Kulkarni 
320*7e6ad469SVishal Kulkarni 	if (cudbg_hdr->data_len)
321*7e6ad469SVishal Kulkarni 		flag_ext = 1;
322*7e6ad469SVishal Kulkarni 
323*7e6ad469SVishal Kulkarni 	if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
324*7e6ad469SVishal Kulkarni 	    dbg_buff.size) {
325*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_SMALL_BUFF;
326*7e6ad469SVishal Kulkarni 		total_size = cudbg_hdr->hdr_len;
327*7e6ad469SVishal Kulkarni 		goto err;
328*7e6ad469SVishal Kulkarni 	}
329*7e6ad469SVishal Kulkarni 
330*7e6ad469SVishal Kulkarni 	/* If ext flag is set then move the offset to the end of the buf
331*7e6ad469SVishal Kulkarni 	 * so that we can add ext entities
332*7e6ad469SVishal Kulkarni 	 */
333*7e6ad469SVishal Kulkarni 	if (flag_ext) {
334*7e6ad469SVishal Kulkarni 		ext_entity_hdr = (struct cudbg_entity_hdr *)
335*7e6ad469SVishal Kulkarni 			      ((char *)outbuf + cudbg_hdr->hdr_len +
336*7e6ad469SVishal Kulkarni 			      (sizeof(struct cudbg_entity_hdr) *
337*7e6ad469SVishal Kulkarni 			      (CUDBG_EXT_ENTITY - 1)));
338*7e6ad469SVishal Kulkarni 		ext_entity_hdr->start_offset = cudbg_hdr->data_len;
339*7e6ad469SVishal Kulkarni 		ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
340*7e6ad469SVishal Kulkarni 		ext_entity_hdr->size = 0;
341*7e6ad469SVishal Kulkarni 		dbg_buff.offset = cudbg_hdr->data_len;
342*7e6ad469SVishal Kulkarni 	} else {
343*7e6ad469SVishal Kulkarni 		dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
344*7e6ad469SVishal Kulkarni 		dbg_buff.offset += CUDBG_MAX_ENTITY *
345*7e6ad469SVishal Kulkarni 					sizeof(struct cudbg_entity_hdr);
346*7e6ad469SVishal Kulkarni 	}
347*7e6ad469SVishal Kulkarni 
348*7e6ad469SVishal Kulkarni 	total_size = dbg_buff.offset;
349*7e6ad469SVishal Kulkarni 	all = dbg_bitmap[0] & (1 << CUDBG_ALL);
350*7e6ad469SVishal Kulkarni 
351*7e6ad469SVishal Kulkarni 	for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
352*7e6ad469SVishal Kulkarni 		index = i / 8;
353*7e6ad469SVishal Kulkarni 		bit = i % 8;
354*7e6ad469SVishal Kulkarni 
355*7e6ad469SVishal Kulkarni 		if (entity_list[i].bit == CUDBG_EXT_ENTITY)
356*7e6ad469SVishal Kulkarni 			continue;
357*7e6ad469SVishal Kulkarni 
358*7e6ad469SVishal Kulkarni 		if (all || (dbg_bitmap[index] & (1 << bit))) {
359*7e6ad469SVishal Kulkarni 
360*7e6ad469SVishal Kulkarni 			if (!flag_ext) {
361*7e6ad469SVishal Kulkarni 				rc = get_entity_hdr(outbuf, i, dbg_buff.size,
362*7e6ad469SVishal Kulkarni 						    &entity_hdr);
363*7e6ad469SVishal Kulkarni 				if (rc)
364*7e6ad469SVishal Kulkarni 					cudbg_hdr->hdr_flags = rc;
365*7e6ad469SVishal Kulkarni 			} else {
366*7e6ad469SVishal Kulkarni 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
367*7e6ad469SVishal Kulkarni 							     &dbg_buff,
368*7e6ad469SVishal Kulkarni 							     &entity_hdr);
369*7e6ad469SVishal Kulkarni 				if (rc)
370*7e6ad469SVishal Kulkarni 					goto err;
371*7e6ad469SVishal Kulkarni 
372*7e6ad469SVishal Kulkarni 				/* move the offset after the ext header */
373*7e6ad469SVishal Kulkarni 				dbg_buff.offset +=
374*7e6ad469SVishal Kulkarni 					sizeof(struct cudbg_entity_hdr);
375*7e6ad469SVishal Kulkarni 			}
376*7e6ad469SVishal Kulkarni 
377*7e6ad469SVishal Kulkarni 			entity_hdr->entity_type = i;
378*7e6ad469SVishal Kulkarni 			entity_hdr->start_offset = dbg_buff.offset;
379*7e6ad469SVishal Kulkarni 			/* process each entity by calling process_entity fp */
380*7e6ad469SVishal Kulkarni 			remaining_buf_size = dbg_buff.size - dbg_buff.offset;
381*7e6ad469SVishal Kulkarni 
382*7e6ad469SVishal Kulkarni 			if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
383*7e6ad469SVishal Kulkarni 			    is_large_entity(i)) {
384*7e6ad469SVishal Kulkarni 				if (cudbg_init->verbose)
385*7e6ad469SVishal Kulkarni 					cudbg_init->print(padap->dip, CE_NOTE,
386*7e6ad469SVishal Kulkarni 							  "Skipping %s\n",
387*7e6ad469SVishal Kulkarni 					    entity_list[i].name);
388*7e6ad469SVishal Kulkarni 				skip_entity(i);
389*7e6ad469SVishal Kulkarni 				continue;
390*7e6ad469SVishal Kulkarni 			} else {
391*7e6ad469SVishal Kulkarni 
392*7e6ad469SVishal Kulkarni 				/* If fw_attach is 0, then skip entities which
393*7e6ad469SVishal Kulkarni 				 * communicates with firmware
394*7e6ad469SVishal Kulkarni 				 */
395*7e6ad469SVishal Kulkarni 
396*7e6ad469SVishal Kulkarni 				if (!is_fw_attached(cudbg_init) &&
397*7e6ad469SVishal Kulkarni 				    (entity_list[i].flag &
398*7e6ad469SVishal Kulkarni 				    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
399*7e6ad469SVishal Kulkarni 					if (cudbg_init->verbose)
400*7e6ad469SVishal Kulkarni 						cudbg_init->print(padap->dip, CE_NOTE,
401*7e6ad469SVishal Kulkarni 							  "Skipping %s entity,"\
402*7e6ad469SVishal Kulkarni 							  "because fw_attach "\
403*7e6ad469SVishal Kulkarni 							  "is 0\n",
404*7e6ad469SVishal Kulkarni 							  entity_list[i].name);
405*7e6ad469SVishal Kulkarni 					continue;
406*7e6ad469SVishal Kulkarni 				}
407*7e6ad469SVishal Kulkarni 
408*7e6ad469SVishal Kulkarni 				if (cudbg_init->verbose)
409*7e6ad469SVishal Kulkarni 					cudbg_init->print(padap->dip, CE_NOTE,
410*7e6ad469SVishal Kulkarni 							  "collecting debug entity: "\
411*7e6ad469SVishal Kulkarni 						  "%s\n", entity_list[i].name);
412*7e6ad469SVishal Kulkarni 				memset(&cudbg_err, 0,
413*7e6ad469SVishal Kulkarni 				       sizeof(struct cudbg_error));
414*7e6ad469SVishal Kulkarni 				rc = process_entity[i-1](cudbg_init, &dbg_buff,
415*7e6ad469SVishal Kulkarni 							 &cudbg_err);
416*7e6ad469SVishal Kulkarni 			}
417*7e6ad469SVishal Kulkarni 
418*7e6ad469SVishal Kulkarni 			if (rc) {
419*7e6ad469SVishal Kulkarni 				entity_hdr->size = 0;
420*7e6ad469SVishal Kulkarni 				dbg_buff.offset = entity_hdr->start_offset;
421*7e6ad469SVishal Kulkarni 			} else
422*7e6ad469SVishal Kulkarni 				align_debug_buffer(&dbg_buff, entity_hdr);
423*7e6ad469SVishal Kulkarni 
424*7e6ad469SVishal Kulkarni 			if (cudbg_err.sys_err)
425*7e6ad469SVishal Kulkarni 				rc = CUDBG_SYSTEM_ERROR;
426*7e6ad469SVishal Kulkarni 
427*7e6ad469SVishal Kulkarni 			entity_hdr->hdr_flags =  rc;
428*7e6ad469SVishal Kulkarni 			entity_hdr->sys_err = cudbg_err.sys_err;
429*7e6ad469SVishal Kulkarni 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
430*7e6ad469SVishal Kulkarni 
431*7e6ad469SVishal Kulkarni 			/* We don't want to include ext entity size in global
432*7e6ad469SVishal Kulkarni 			 * header
433*7e6ad469SVishal Kulkarni 			 */
434*7e6ad469SVishal Kulkarni 			if (!flag_ext)
435*7e6ad469SVishal Kulkarni 				total_size += entity_hdr->size;
436*7e6ad469SVishal Kulkarni 
437*7e6ad469SVishal Kulkarni 			cudbg_hdr->data_len = total_size;
438*7e6ad469SVishal Kulkarni 			*outbuf_size = total_size;
439*7e6ad469SVishal Kulkarni 
440*7e6ad469SVishal Kulkarni 			/* consider the size of the ext entity header and data
441*7e6ad469SVishal Kulkarni 			 * also
442*7e6ad469SVishal Kulkarni 			 */
443*7e6ad469SVishal Kulkarni 			if (flag_ext) {
444*7e6ad469SVishal Kulkarni 				ext_size += (sizeof(struct cudbg_entity_hdr) +
445*7e6ad469SVishal Kulkarni 					     entity_hdr->size);
446*7e6ad469SVishal Kulkarni 				entity_hdr->start_offset -= cudbg_hdr->data_len;
447*7e6ad469SVishal Kulkarni 				ext_entity_hdr->size = ext_size;
448*7e6ad469SVishal Kulkarni 				entity_hdr->next_ext_offset = ext_size;
449*7e6ad469SVishal Kulkarni 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
450*7e6ad469SVishal Kulkarni 			}
451*7e6ad469SVishal Kulkarni 
452*7e6ad469SVishal Kulkarni 			if (cudbg_init->use_flash) {
453*7e6ad469SVishal Kulkarni 				if (flag_ext) {
454*7e6ad469SVishal Kulkarni 					wr_entity_to_flash(handle,
455*7e6ad469SVishal Kulkarni 							   &dbg_buff,
456*7e6ad469SVishal Kulkarni 							   ext_entity_hdr->
457*7e6ad469SVishal Kulkarni 							   start_offset,
458*7e6ad469SVishal Kulkarni 							   entity_hdr->
459*7e6ad469SVishal Kulkarni 							   size,
460*7e6ad469SVishal Kulkarni 							   CUDBG_EXT_ENTITY,
461*7e6ad469SVishal Kulkarni 							   ext_size);
462*7e6ad469SVishal Kulkarni 				}
463*7e6ad469SVishal Kulkarni 				else
464*7e6ad469SVishal Kulkarni 					wr_entity_to_flash(handle,
465*7e6ad469SVishal Kulkarni 							   &dbg_buff,
466*7e6ad469SVishal Kulkarni 							   entity_hdr->\
467*7e6ad469SVishal Kulkarni 							   start_offset,
468*7e6ad469SVishal Kulkarni 							   entity_hdr->size,
469*7e6ad469SVishal Kulkarni 							   i, ext_size);
470*7e6ad469SVishal Kulkarni 			}
471*7e6ad469SVishal Kulkarni 		}
472*7e6ad469SVishal Kulkarni 	}
473*7e6ad469SVishal Kulkarni 
474*7e6ad469SVishal Kulkarni 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
475*7e6ad469SVishal Kulkarni 	     i++) {
476*7e6ad469SVishal Kulkarni 		large_entity_code = large_entity_list[i].entity_code;
477*7e6ad469SVishal Kulkarni 		if (large_entity_list[i].skip_flag) {
478*7e6ad469SVishal Kulkarni 			if (!flag_ext) {
479*7e6ad469SVishal Kulkarni 				rc = get_entity_hdr(outbuf, large_entity_code,
480*7e6ad469SVishal Kulkarni 						    dbg_buff.size, &entity_hdr);
481*7e6ad469SVishal Kulkarni 				if (rc)
482*7e6ad469SVishal Kulkarni 					cudbg_hdr->hdr_flags = rc;
483*7e6ad469SVishal Kulkarni 			} else {
484*7e6ad469SVishal Kulkarni 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
485*7e6ad469SVishal Kulkarni 							     &dbg_buff,
486*7e6ad469SVishal Kulkarni 							     &entity_hdr);
487*7e6ad469SVishal Kulkarni 				if (rc)
488*7e6ad469SVishal Kulkarni 					goto err;
489*7e6ad469SVishal Kulkarni 
490*7e6ad469SVishal Kulkarni 				dbg_buff.offset +=
491*7e6ad469SVishal Kulkarni 					sizeof(struct cudbg_entity_hdr);
492*7e6ad469SVishal Kulkarni 			}
493*7e6ad469SVishal Kulkarni 
494*7e6ad469SVishal Kulkarni 			/* If fw_attach is 0, then skip entities which
495*7e6ad469SVishal Kulkarni 			 * communicates with firmware
496*7e6ad469SVishal Kulkarni 			 */
497*7e6ad469SVishal Kulkarni 			if (!is_fw_attached(cudbg_init) &&
498*7e6ad469SVishal Kulkarni 			    (entity_list[large_entity_code].flag &
499*7e6ad469SVishal Kulkarni 			    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
500*7e6ad469SVishal Kulkarni 				if (cudbg_init->verbose)
501*7e6ad469SVishal Kulkarni 					cudbg_init->print(padap->dip, CE_NOTE,
502*7e6ad469SVishal Kulkarni 						  "Skipping %s entity,"\
503*7e6ad469SVishal Kulkarni 						  "because fw_attach "\
504*7e6ad469SVishal Kulkarni 						  "is 0\n",
505*7e6ad469SVishal Kulkarni 						  entity_list[large_entity_code]
506*7e6ad469SVishal Kulkarni 						  .name);
507*7e6ad469SVishal Kulkarni 				continue;
508*7e6ad469SVishal Kulkarni 			}
509*7e6ad469SVishal Kulkarni 
510*7e6ad469SVishal Kulkarni 			entity_hdr->entity_type = large_entity_code;
511*7e6ad469SVishal Kulkarni 			entity_hdr->start_offset = dbg_buff.offset;
512*7e6ad469SVishal Kulkarni 			if (cudbg_init->verbose)
513*7e6ad469SVishal Kulkarni 				cudbg_init->print(padap->dip, CE_NOTE,
514*7e6ad469SVishal Kulkarni 					  "Re-trying debug entity: %s\n",
515*7e6ad469SVishal Kulkarni 					  entity_list[large_entity_code].name);
516*7e6ad469SVishal Kulkarni 
517*7e6ad469SVishal Kulkarni 			memset(&cudbg_err, 0, sizeof(struct cudbg_error));
518*7e6ad469SVishal Kulkarni 			rc = process_entity[large_entity_code - 1](cudbg_init,
519*7e6ad469SVishal Kulkarni 								   &dbg_buff,
520*7e6ad469SVishal Kulkarni 								   &cudbg_err);
521*7e6ad469SVishal Kulkarni 			if (rc) {
522*7e6ad469SVishal Kulkarni 				entity_hdr->size = 0;
523*7e6ad469SVishal Kulkarni 				dbg_buff.offset = entity_hdr->start_offset;
524*7e6ad469SVishal Kulkarni 			} else
525*7e6ad469SVishal Kulkarni 				align_debug_buffer(&dbg_buff, entity_hdr);
526*7e6ad469SVishal Kulkarni 
527*7e6ad469SVishal Kulkarni 			if (cudbg_err.sys_err)
528*7e6ad469SVishal Kulkarni 				rc = CUDBG_SYSTEM_ERROR;
529*7e6ad469SVishal Kulkarni 
530*7e6ad469SVishal Kulkarni 			entity_hdr->hdr_flags = rc;
531*7e6ad469SVishal Kulkarni 			entity_hdr->sys_err = cudbg_err.sys_err;
532*7e6ad469SVishal Kulkarni 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
533*7e6ad469SVishal Kulkarni 
534*7e6ad469SVishal Kulkarni 			/* We don't want to include ext entity size in global
535*7e6ad469SVishal Kulkarni 			 * header
536*7e6ad469SVishal Kulkarni 			 */
537*7e6ad469SVishal Kulkarni 			if (!flag_ext)
538*7e6ad469SVishal Kulkarni 				total_size += entity_hdr->size;
539*7e6ad469SVishal Kulkarni 
540*7e6ad469SVishal Kulkarni 			cudbg_hdr->data_len = total_size;
541*7e6ad469SVishal Kulkarni 			*outbuf_size = total_size;
542*7e6ad469SVishal Kulkarni 
543*7e6ad469SVishal Kulkarni 			/* consider the size of the ext entity header and
544*7e6ad469SVishal Kulkarni 			 * data also
545*7e6ad469SVishal Kulkarni 			 */
546*7e6ad469SVishal Kulkarni 			if (flag_ext) {
547*7e6ad469SVishal Kulkarni 				ext_size += (sizeof(struct cudbg_entity_hdr) +
548*7e6ad469SVishal Kulkarni 						   entity_hdr->size);
549*7e6ad469SVishal Kulkarni 				entity_hdr->start_offset -=
550*7e6ad469SVishal Kulkarni 							cudbg_hdr->data_len;
551*7e6ad469SVishal Kulkarni 				ext_entity_hdr->size = ext_size;
552*7e6ad469SVishal Kulkarni 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
553*7e6ad469SVishal Kulkarni 			}
554*7e6ad469SVishal Kulkarni 
555*7e6ad469SVishal Kulkarni 			if (cudbg_init->use_flash) {
556*7e6ad469SVishal Kulkarni 				if (flag_ext)
557*7e6ad469SVishal Kulkarni 					wr_entity_to_flash(handle,
558*7e6ad469SVishal Kulkarni 							   &dbg_buff,
559*7e6ad469SVishal Kulkarni 							   ext_entity_hdr->
560*7e6ad469SVishal Kulkarni 							   start_offset,
561*7e6ad469SVishal Kulkarni 							   entity_hdr->size,
562*7e6ad469SVishal Kulkarni 							   CUDBG_EXT_ENTITY,
563*7e6ad469SVishal Kulkarni 							   ext_size);
564*7e6ad469SVishal Kulkarni 				else
565*7e6ad469SVishal Kulkarni 					wr_entity_to_flash(handle,
566*7e6ad469SVishal Kulkarni 							   &dbg_buff,
567*7e6ad469SVishal Kulkarni 							   entity_hdr->
568*7e6ad469SVishal Kulkarni 							   start_offset,
569*7e6ad469SVishal Kulkarni 							   entity_hdr->
570*7e6ad469SVishal Kulkarni 							   size,
571*7e6ad469SVishal Kulkarni 							   large_entity_list[i].
572*7e6ad469SVishal Kulkarni 							   entity_code,
573*7e6ad469SVishal Kulkarni 							   ext_size);
574*7e6ad469SVishal Kulkarni 			}
575*7e6ad469SVishal Kulkarni 		}
576*7e6ad469SVishal Kulkarni 	}
577*7e6ad469SVishal Kulkarni 
578*7e6ad469SVishal Kulkarni 	cudbg_hdr->data_len = total_size;
579*7e6ad469SVishal Kulkarni 	*outbuf_size = total_size;
580*7e6ad469SVishal Kulkarni 
581*7e6ad469SVishal Kulkarni 	if (flag_ext)
582*7e6ad469SVishal Kulkarni 		*outbuf_size += ext_size;
583*7e6ad469SVishal Kulkarni 
584*7e6ad469SVishal Kulkarni 	return 0;
585*7e6ad469SVishal Kulkarni err:
586*7e6ad469SVishal Kulkarni 	return rc;
587*7e6ad469SVishal Kulkarni }
588*7e6ad469SVishal Kulkarni 
589*7e6ad469SVishal Kulkarni void
590*7e6ad469SVishal Kulkarni reset_skip_entity(void)
591*7e6ad469SVishal Kulkarni {
592*7e6ad469SVishal Kulkarni 	int i;
593*7e6ad469SVishal Kulkarni 
594*7e6ad469SVishal Kulkarni 	for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
595*7e6ad469SVishal Kulkarni 		large_entity_list[i].skip_flag = 0;
596*7e6ad469SVishal Kulkarni }
597*7e6ad469SVishal Kulkarni 
598*7e6ad469SVishal Kulkarni void
599*7e6ad469SVishal Kulkarni skip_entity(int entity_code)
600*7e6ad469SVishal Kulkarni {
601*7e6ad469SVishal Kulkarni 	int i;
602*7e6ad469SVishal Kulkarni 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
603*7e6ad469SVishal Kulkarni 	     i++) {
604*7e6ad469SVishal Kulkarni 		if (large_entity_list[i].entity_code == entity_code)
605*7e6ad469SVishal Kulkarni 			large_entity_list[i].skip_flag = 1;
606*7e6ad469SVishal Kulkarni 	}
607*7e6ad469SVishal Kulkarni }
608*7e6ad469SVishal Kulkarni 
609*7e6ad469SVishal Kulkarni int
610*7e6ad469SVishal Kulkarni is_large_entity(int entity_code)
611*7e6ad469SVishal Kulkarni {
612*7e6ad469SVishal Kulkarni 	int i;
613*7e6ad469SVishal Kulkarni 
614*7e6ad469SVishal Kulkarni 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
615*7e6ad469SVishal Kulkarni 	     i++) {
616*7e6ad469SVishal Kulkarni 		if (large_entity_list[i].entity_code == entity_code)
617*7e6ad469SVishal Kulkarni 			return 1;
618*7e6ad469SVishal Kulkarni 	}
619*7e6ad469SVishal Kulkarni 	return 0;
620*7e6ad469SVishal Kulkarni }
621*7e6ad469SVishal Kulkarni 
622*7e6ad469SVishal Kulkarni int
623*7e6ad469SVishal Kulkarni get_entity_hdr(void *outbuf, int i, u32 size,
624*7e6ad469SVishal Kulkarni 	       struct cudbg_entity_hdr **entity_hdr)
625*7e6ad469SVishal Kulkarni {
626*7e6ad469SVishal Kulkarni 	int rc = 0;
627*7e6ad469SVishal Kulkarni 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
628*7e6ad469SVishal Kulkarni 
629*7e6ad469SVishal Kulkarni 	if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
630*7e6ad469SVishal Kulkarni 		return CUDBG_STATUS_SMALL_BUFF;
631*7e6ad469SVishal Kulkarni 
632*7e6ad469SVishal Kulkarni 	*entity_hdr = (struct cudbg_entity_hdr *)
633*7e6ad469SVishal Kulkarni 		      ((char *)outbuf+cudbg_hdr->hdr_len +
634*7e6ad469SVishal Kulkarni 		       (sizeof(struct cudbg_entity_hdr)*(i-1)));
635*7e6ad469SVishal Kulkarni 	return rc;
636*7e6ad469SVishal Kulkarni }
637*7e6ad469SVishal Kulkarni 
638*7e6ad469SVishal Kulkarni static int
639*7e6ad469SVishal Kulkarni collect_rss(struct cudbg_init *pdbg_init,
640*7e6ad469SVishal Kulkarni 	    struct cudbg_buffer *dbg_buff,
641*7e6ad469SVishal Kulkarni 	    struct cudbg_error *cudbg_err)
642*7e6ad469SVishal Kulkarni {
643*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
644*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
645*7e6ad469SVishal Kulkarni 	u32 size;
646*7e6ad469SVishal Kulkarni 	int rc = 0;
647*7e6ad469SVishal Kulkarni 
648*7e6ad469SVishal Kulkarni 	size = RSS_NENTRIES  * sizeof(u16);
649*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
650*7e6ad469SVishal Kulkarni 	if (rc)
651*7e6ad469SVishal Kulkarni 		goto err;
652*7e6ad469SVishal Kulkarni 
653*7e6ad469SVishal Kulkarni 	rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
654*7e6ad469SVishal Kulkarni 	if (rc) {
655*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
656*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
657*7e6ad469SVishal Kulkarni 					 "%s(), t4_read_rss failed!, rc: %d\n",
658*7e6ad469SVishal Kulkarni 				 __func__, rc);
659*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
660*7e6ad469SVishal Kulkarni 		goto err1;
661*7e6ad469SVishal Kulkarni 	}
662*7e6ad469SVishal Kulkarni 
663*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
664*7e6ad469SVishal Kulkarni 	if (rc)
665*7e6ad469SVishal Kulkarni 		goto err1;
666*7e6ad469SVishal Kulkarni 
667*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
668*7e6ad469SVishal Kulkarni 
669*7e6ad469SVishal Kulkarni err1:
670*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
671*7e6ad469SVishal Kulkarni err:
672*7e6ad469SVishal Kulkarni 	return rc;
673*7e6ad469SVishal Kulkarni }
674*7e6ad469SVishal Kulkarni 
675*7e6ad469SVishal Kulkarni static int
676*7e6ad469SVishal Kulkarni collect_sw_state(struct cudbg_init *pdbg_init,
677*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
678*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
679*7e6ad469SVishal Kulkarni {
680*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
681*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
682*7e6ad469SVishal Kulkarni 	struct sw_state *swstate;
683*7e6ad469SVishal Kulkarni 	u32 size;
684*7e6ad469SVishal Kulkarni 	int rc = 0;
685*7e6ad469SVishal Kulkarni 
686*7e6ad469SVishal Kulkarni 	size = sizeof(struct sw_state);
687*7e6ad469SVishal Kulkarni 
688*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
689*7e6ad469SVishal Kulkarni 	if (rc)
690*7e6ad469SVishal Kulkarni 		goto err;
691*7e6ad469SVishal Kulkarni 
692*7e6ad469SVishal Kulkarni 	swstate = (struct sw_state *) scratch_buff.data;
693*7e6ad469SVishal Kulkarni 
694*7e6ad469SVishal Kulkarni 	swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
695*7e6ad469SVishal Kulkarni 	snprintf((char *)swstate->caller_string, sizeof(swstate->caller_string), "%s",
696*7e6ad469SVishal Kulkarni 	    "Illumos");
697*7e6ad469SVishal Kulkarni 	swstate->os_type = 0;
698*7e6ad469SVishal Kulkarni 
699*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
700*7e6ad469SVishal Kulkarni 	if (rc)
701*7e6ad469SVishal Kulkarni 		goto err1;
702*7e6ad469SVishal Kulkarni 
703*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
704*7e6ad469SVishal Kulkarni 
705*7e6ad469SVishal Kulkarni err1:
706*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
707*7e6ad469SVishal Kulkarni err:
708*7e6ad469SVishal Kulkarni 	return rc;
709*7e6ad469SVishal Kulkarni }
710*7e6ad469SVishal Kulkarni 
711*7e6ad469SVishal Kulkarni static int
712*7e6ad469SVishal Kulkarni collect_ddp_stats(struct cudbg_init *pdbg_init,
713*7e6ad469SVishal Kulkarni 		  struct cudbg_buffer *dbg_buff,
714*7e6ad469SVishal Kulkarni 		  struct cudbg_error *cudbg_err)
715*7e6ad469SVishal Kulkarni {
716*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
717*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
718*7e6ad469SVishal Kulkarni 	struct tp_usm_stats  *tp_usm_stats_buff;
719*7e6ad469SVishal Kulkarni 	u32 size;
720*7e6ad469SVishal Kulkarni 	int rc = 0;
721*7e6ad469SVishal Kulkarni 
722*7e6ad469SVishal Kulkarni 	size = sizeof(struct tp_usm_stats);
723*7e6ad469SVishal Kulkarni 
724*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
725*7e6ad469SVishal Kulkarni 	if (rc)
726*7e6ad469SVishal Kulkarni 		goto err;
727*7e6ad469SVishal Kulkarni 
728*7e6ad469SVishal Kulkarni 	tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
729*7e6ad469SVishal Kulkarni 
730*7e6ad469SVishal Kulkarni 	/* spin_lock(&padap->stats_lock);	TODO*/
731*7e6ad469SVishal Kulkarni 	t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
732*7e6ad469SVishal Kulkarni 	/* spin_unlock(&padap->stats_lock);	TODO*/
733*7e6ad469SVishal Kulkarni 
734*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
735*7e6ad469SVishal Kulkarni 	if (rc)
736*7e6ad469SVishal Kulkarni 		goto err1;
737*7e6ad469SVishal Kulkarni 
738*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
739*7e6ad469SVishal Kulkarni 
740*7e6ad469SVishal Kulkarni err1:
741*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
742*7e6ad469SVishal Kulkarni err:
743*7e6ad469SVishal Kulkarni 	return rc;
744*7e6ad469SVishal Kulkarni }
745*7e6ad469SVishal Kulkarni 
746*7e6ad469SVishal Kulkarni static int
747*7e6ad469SVishal Kulkarni collect_ulptx_la(struct cudbg_init *pdbg_init,
748*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
749*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
750*7e6ad469SVishal Kulkarni {
751*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
752*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
753*7e6ad469SVishal Kulkarni 	struct struct_ulptx_la *ulptx_la_buff;
754*7e6ad469SVishal Kulkarni 	u32 size, i, j;
755*7e6ad469SVishal Kulkarni 	int rc = 0;
756*7e6ad469SVishal Kulkarni 
757*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_ulptx_la);
758*7e6ad469SVishal Kulkarni 
759*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
760*7e6ad469SVishal Kulkarni 	if (rc)
761*7e6ad469SVishal Kulkarni 		goto err;
762*7e6ad469SVishal Kulkarni 
763*7e6ad469SVishal Kulkarni 	ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
764*7e6ad469SVishal Kulkarni 
765*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
766*7e6ad469SVishal Kulkarni 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
767*7e6ad469SVishal Kulkarni 						      A_ULP_TX_LA_RDPTR_0 +
768*7e6ad469SVishal Kulkarni 						      0x10 * i);
769*7e6ad469SVishal Kulkarni 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
770*7e6ad469SVishal Kulkarni 						      A_ULP_TX_LA_WRPTR_0 +
771*7e6ad469SVishal Kulkarni 						      0x10 * i);
772*7e6ad469SVishal Kulkarni 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
773*7e6ad469SVishal Kulkarni 						       A_ULP_TX_LA_RDDATA_0 +
774*7e6ad469SVishal Kulkarni 						       0x10 * i);
775*7e6ad469SVishal Kulkarni 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
776*7e6ad469SVishal Kulkarni 			ulptx_la_buff->rd_data[i][j] =
777*7e6ad469SVishal Kulkarni 				t4_read_reg(padap,
778*7e6ad469SVishal Kulkarni 					    A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
779*7e6ad469SVishal Kulkarni 		}
780*7e6ad469SVishal Kulkarni 	}
781*7e6ad469SVishal Kulkarni 
782*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
783*7e6ad469SVishal Kulkarni 	if (rc)
784*7e6ad469SVishal Kulkarni 		goto err1;
785*7e6ad469SVishal Kulkarni 
786*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
787*7e6ad469SVishal Kulkarni 
788*7e6ad469SVishal Kulkarni err1:
789*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
790*7e6ad469SVishal Kulkarni err:
791*7e6ad469SVishal Kulkarni 	return rc;
792*7e6ad469SVishal Kulkarni 
793*7e6ad469SVishal Kulkarni }
794*7e6ad469SVishal Kulkarni 
795*7e6ad469SVishal Kulkarni static int
796*7e6ad469SVishal Kulkarni collect_ulprx_la(struct cudbg_init *pdbg_init,
797*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
798*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
799*7e6ad469SVishal Kulkarni {
800*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
801*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
802*7e6ad469SVishal Kulkarni 	struct struct_ulprx_la *ulprx_la_buff;
803*7e6ad469SVishal Kulkarni 	u32 size;
804*7e6ad469SVishal Kulkarni 	int rc = 0;
805*7e6ad469SVishal Kulkarni 
806*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_ulprx_la);
807*7e6ad469SVishal Kulkarni 
808*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
809*7e6ad469SVishal Kulkarni 	if (rc)
810*7e6ad469SVishal Kulkarni 		goto err;
811*7e6ad469SVishal Kulkarni 
812*7e6ad469SVishal Kulkarni 	ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
813*7e6ad469SVishal Kulkarni 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
814*7e6ad469SVishal Kulkarni 	ulprx_la_buff->size = ULPRX_LA_SIZE;
815*7e6ad469SVishal Kulkarni 
816*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
817*7e6ad469SVishal Kulkarni 	if (rc)
818*7e6ad469SVishal Kulkarni 		goto err1;
819*7e6ad469SVishal Kulkarni 
820*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
821*7e6ad469SVishal Kulkarni 
822*7e6ad469SVishal Kulkarni err1:
823*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
824*7e6ad469SVishal Kulkarni err:
825*7e6ad469SVishal Kulkarni 	return rc;
826*7e6ad469SVishal Kulkarni }
827*7e6ad469SVishal Kulkarni 
828*7e6ad469SVishal Kulkarni static int
829*7e6ad469SVishal Kulkarni collect_cpl_stats(struct cudbg_init *pdbg_init,
830*7e6ad469SVishal Kulkarni 		  struct cudbg_buffer *dbg_buff,
831*7e6ad469SVishal Kulkarni 		  struct cudbg_error *cudbg_err)
832*7e6ad469SVishal Kulkarni {
833*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
834*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
835*7e6ad469SVishal Kulkarni 	struct struct_tp_cpl_stats *tp_cpl_stats_buff;
836*7e6ad469SVishal Kulkarni 	u32 size;
837*7e6ad469SVishal Kulkarni 	int rc = 0;
838*7e6ad469SVishal Kulkarni 
839*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_tp_cpl_stats);
840*7e6ad469SVishal Kulkarni 
841*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
842*7e6ad469SVishal Kulkarni 	if (rc)
843*7e6ad469SVishal Kulkarni 		goto err;
844*7e6ad469SVishal Kulkarni 
845*7e6ad469SVishal Kulkarni 	tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
846*7e6ad469SVishal Kulkarni 	tp_cpl_stats_buff->nchan = padap->params.arch.nchan;
847*7e6ad469SVishal Kulkarni 
848*7e6ad469SVishal Kulkarni 	t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
849*7e6ad469SVishal Kulkarni 
850*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
851*7e6ad469SVishal Kulkarni 	if (rc)
852*7e6ad469SVishal Kulkarni 		goto err1;
853*7e6ad469SVishal Kulkarni 
854*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
855*7e6ad469SVishal Kulkarni 
856*7e6ad469SVishal Kulkarni err1:
857*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
858*7e6ad469SVishal Kulkarni err:
859*7e6ad469SVishal Kulkarni 	return rc;
860*7e6ad469SVishal Kulkarni }
861*7e6ad469SVishal Kulkarni 
862*7e6ad469SVishal Kulkarni static int
863*7e6ad469SVishal Kulkarni collect_wc_stats(struct cudbg_init *pdbg_init,
864*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
865*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
866*7e6ad469SVishal Kulkarni {
867*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
868*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
869*7e6ad469SVishal Kulkarni 	struct struct_wc_stats *wc_stats_buff;
870*7e6ad469SVishal Kulkarni 	u32 val1;
871*7e6ad469SVishal Kulkarni 	u32 val2;
872*7e6ad469SVishal Kulkarni 	u32 size;
873*7e6ad469SVishal Kulkarni 
874*7e6ad469SVishal Kulkarni 	int rc = 0;
875*7e6ad469SVishal Kulkarni 
876*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_wc_stats);
877*7e6ad469SVishal Kulkarni 
878*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
879*7e6ad469SVishal Kulkarni 	if (rc)
880*7e6ad469SVishal Kulkarni 		goto err;
881*7e6ad469SVishal Kulkarni 
882*7e6ad469SVishal Kulkarni 	wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
883*7e6ad469SVishal Kulkarni 
884*7e6ad469SVishal Kulkarni 	if (!is_t4(padap->params.chip)) {
885*7e6ad469SVishal Kulkarni 		val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
886*7e6ad469SVishal Kulkarni 		val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
887*7e6ad469SVishal Kulkarni 		wc_stats_buff->wr_cl_success = val1 - val2;
888*7e6ad469SVishal Kulkarni 		wc_stats_buff->wr_cl_fail = val2;
889*7e6ad469SVishal Kulkarni 	} else {
890*7e6ad469SVishal Kulkarni 		wc_stats_buff->wr_cl_success = 0;
891*7e6ad469SVishal Kulkarni 		wc_stats_buff->wr_cl_fail = 0;
892*7e6ad469SVishal Kulkarni 	}
893*7e6ad469SVishal Kulkarni 
894*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
895*7e6ad469SVishal Kulkarni 	if (rc)
896*7e6ad469SVishal Kulkarni 		goto err1;
897*7e6ad469SVishal Kulkarni 
898*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
899*7e6ad469SVishal Kulkarni err1:
900*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
901*7e6ad469SVishal Kulkarni err:
902*7e6ad469SVishal Kulkarni 	return rc;
903*7e6ad469SVishal Kulkarni }
904*7e6ad469SVishal Kulkarni 
905*7e6ad469SVishal Kulkarni static int
906*7e6ad469SVishal Kulkarni mem_desc_cmp(const void *a, const void *b)
907*7e6ad469SVishal Kulkarni {
908*7e6ad469SVishal Kulkarni 	return ((const struct struct_mem_desc *)a)->base -
909*7e6ad469SVishal Kulkarni 		((const struct struct_mem_desc *)b)->base;
910*7e6ad469SVishal Kulkarni }
911*7e6ad469SVishal Kulkarni 
912*7e6ad469SVishal Kulkarni static int
913*7e6ad469SVishal Kulkarni fill_meminfo(struct adapter *padap,
914*7e6ad469SVishal Kulkarni 	     struct struct_meminfo *meminfo_buff)
915*7e6ad469SVishal Kulkarni {
916*7e6ad469SVishal Kulkarni 	struct struct_mem_desc *md;
917*7e6ad469SVishal Kulkarni 	u32 size, lo, hi;
918*7e6ad469SVishal Kulkarni 	u32 used, alloc;
919*7e6ad469SVishal Kulkarni 	int n, i, rc = 0;
920*7e6ad469SVishal Kulkarni 
921*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_meminfo);
922*7e6ad469SVishal Kulkarni 
923*7e6ad469SVishal Kulkarni 	memset(meminfo_buff->avail, 0,
924*7e6ad469SVishal Kulkarni 	       ARRAY_SIZE(meminfo_buff->avail) *
925*7e6ad469SVishal Kulkarni 	       sizeof(struct struct_mem_desc));
926*7e6ad469SVishal Kulkarni 	memset(meminfo_buff->mem, 0,
927*7e6ad469SVishal Kulkarni 	       (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
928*7e6ad469SVishal Kulkarni 	md  = meminfo_buff->mem;
929*7e6ad469SVishal Kulkarni 
930*7e6ad469SVishal Kulkarni 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
931*7e6ad469SVishal Kulkarni 		meminfo_buff->mem[i].limit = 0;
932*7e6ad469SVishal Kulkarni 		meminfo_buff->mem[i].idx = i;
933*7e6ad469SVishal Kulkarni 	}
934*7e6ad469SVishal Kulkarni 
935*7e6ad469SVishal Kulkarni 	i = 0;
936*7e6ad469SVishal Kulkarni 
937*7e6ad469SVishal Kulkarni 	lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
938*7e6ad469SVishal Kulkarni 
939*7e6ad469SVishal Kulkarni 	if (lo & F_EDRAM0_ENABLE) {
940*7e6ad469SVishal Kulkarni 		hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
941*7e6ad469SVishal Kulkarni 		meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
942*7e6ad469SVishal Kulkarni 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
943*7e6ad469SVishal Kulkarni 					       (G_EDRAM0_SIZE(hi) << 20);
944*7e6ad469SVishal Kulkarni 		meminfo_buff->avail[i].idx = 0;
945*7e6ad469SVishal Kulkarni 		i++;
946*7e6ad469SVishal Kulkarni 	}
947*7e6ad469SVishal Kulkarni 
948*7e6ad469SVishal Kulkarni 	if (lo & F_EDRAM1_ENABLE) {
949*7e6ad469SVishal Kulkarni 		hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
950*7e6ad469SVishal Kulkarni 		meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
951*7e6ad469SVishal Kulkarni 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
952*7e6ad469SVishal Kulkarni 					       (G_EDRAM1_SIZE(hi) << 20);
953*7e6ad469SVishal Kulkarni 		meminfo_buff->avail[i].idx = 1;
954*7e6ad469SVishal Kulkarni 		i++;
955*7e6ad469SVishal Kulkarni 	}
956*7e6ad469SVishal Kulkarni 
957*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip)) {
958*7e6ad469SVishal Kulkarni 		if (lo & F_EXT_MEM0_ENABLE) {
959*7e6ad469SVishal Kulkarni 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
960*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
961*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].limit =
962*7e6ad469SVishal Kulkarni 				meminfo_buff->avail[i].base +
963*7e6ad469SVishal Kulkarni 				(G_EXT_MEM_SIZE(hi) << 20);
964*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].idx = 3;
965*7e6ad469SVishal Kulkarni 			i++;
966*7e6ad469SVishal Kulkarni 		}
967*7e6ad469SVishal Kulkarni 
968*7e6ad469SVishal Kulkarni 		if (lo & F_EXT_MEM1_ENABLE) {
969*7e6ad469SVishal Kulkarni 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
970*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
971*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].limit =
972*7e6ad469SVishal Kulkarni 				meminfo_buff->avail[i].base +
973*7e6ad469SVishal Kulkarni 				(G_EXT_MEM1_SIZE(hi) << 20);
974*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].idx = 4;
975*7e6ad469SVishal Kulkarni 			i++;
976*7e6ad469SVishal Kulkarni 		}
977*7e6ad469SVishal Kulkarni 	} else if (is_t6(padap->params.chip)) {
978*7e6ad469SVishal Kulkarni 		if (lo & F_EXT_MEM_ENABLE) {
979*7e6ad469SVishal Kulkarni 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
980*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
981*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].limit =
982*7e6ad469SVishal Kulkarni 				meminfo_buff->avail[i].base +
983*7e6ad469SVishal Kulkarni 				(G_EXT_MEM_SIZE(hi) << 20);
984*7e6ad469SVishal Kulkarni 			meminfo_buff->avail[i].idx = 2;
985*7e6ad469SVishal Kulkarni 			i++;
986*7e6ad469SVishal Kulkarni 		}
987*7e6ad469SVishal Kulkarni 	}
988*7e6ad469SVishal Kulkarni 
989*7e6ad469SVishal Kulkarni 	if (!i) {				   /* no memory available */
990*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
991*7e6ad469SVishal Kulkarni 		goto err;
992*7e6ad469SVishal Kulkarni 	}
993*7e6ad469SVishal Kulkarni 
994*7e6ad469SVishal Kulkarni 	meminfo_buff->avail_c = i;
995*7e6ad469SVishal Kulkarni 	qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
996*7e6ad469SVishal Kulkarni 	    mem_desc_cmp, NULL);
997*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
998*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
999*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
1000*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
1001*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
1002*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
1003*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
1004*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
1005*7e6ad469SVishal Kulkarni 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
1006*7e6ad469SVishal Kulkarni 
1007*7e6ad469SVishal Kulkarni 	/* the next few have explicit upper bounds */
1008*7e6ad469SVishal Kulkarni 	md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
1009*7e6ad469SVishal Kulkarni 	md->limit = md->base - 1 +
1010*7e6ad469SVishal Kulkarni 		    t4_read_reg(padap,
1011*7e6ad469SVishal Kulkarni 				A_TP_PMM_TX_PAGE_SIZE) *
1012*7e6ad469SVishal Kulkarni 				G_PMTXMAXPAGE(t4_read_reg(padap,
1013*7e6ad469SVishal Kulkarni 							  A_TP_PMM_TX_MAX_PAGE)
1014*7e6ad469SVishal Kulkarni 					     );
1015*7e6ad469SVishal Kulkarni 	md++;
1016*7e6ad469SVishal Kulkarni 
1017*7e6ad469SVishal Kulkarni 	md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
1018*7e6ad469SVishal Kulkarni 	md->limit = md->base - 1 +
1019*7e6ad469SVishal Kulkarni 		    t4_read_reg(padap,
1020*7e6ad469SVishal Kulkarni 				A_TP_PMM_RX_PAGE_SIZE) *
1021*7e6ad469SVishal Kulkarni 				G_PMRXMAXPAGE(t4_read_reg(padap,
1022*7e6ad469SVishal Kulkarni 							  A_TP_PMM_RX_MAX_PAGE)
1023*7e6ad469SVishal Kulkarni 					      );
1024*7e6ad469SVishal Kulkarni 	md++;
1025*7e6ad469SVishal Kulkarni 	if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
1026*7e6ad469SVishal Kulkarni 		if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
1027*7e6ad469SVishal Kulkarni 			hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
1028*7e6ad469SVishal Kulkarni 			md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
1029*7e6ad469SVishal Kulkarni 		} else {
1030*7e6ad469SVishal Kulkarni 			hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
1031*7e6ad469SVishal Kulkarni 			md->base = t4_read_reg(padap,
1032*7e6ad469SVishal Kulkarni 					       A_LE_DB_HASH_TBL_BASE_ADDR);
1033*7e6ad469SVishal Kulkarni 		}
1034*7e6ad469SVishal Kulkarni 		md->limit = 0;
1035*7e6ad469SVishal Kulkarni 	} else {
1036*7e6ad469SVishal Kulkarni 		md->base = 0;
1037*7e6ad469SVishal Kulkarni 		md->idx = ARRAY_SIZE(region);  /* hide it */
1038*7e6ad469SVishal Kulkarni 	}
1039*7e6ad469SVishal Kulkarni 	md++;
1040*7e6ad469SVishal Kulkarni #define ulp_region(reg) \
1041*7e6ad469SVishal Kulkarni 	{\
1042*7e6ad469SVishal Kulkarni 		md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
1043*7e6ad469SVishal Kulkarni 		(md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
1044*7e6ad469SVishal Kulkarni 	}
1045*7e6ad469SVishal Kulkarni 
1046*7e6ad469SVishal Kulkarni 	ulp_region(RX_ISCSI);
1047*7e6ad469SVishal Kulkarni 	ulp_region(RX_TDDP);
1048*7e6ad469SVishal Kulkarni 	ulp_region(TX_TPT);
1049*7e6ad469SVishal Kulkarni 	ulp_region(RX_STAG);
1050*7e6ad469SVishal Kulkarni 	ulp_region(RX_RQ);
1051*7e6ad469SVishal Kulkarni 	ulp_region(RX_RQUDP);
1052*7e6ad469SVishal Kulkarni 	ulp_region(RX_PBL);
1053*7e6ad469SVishal Kulkarni 	ulp_region(TX_PBL);
1054*7e6ad469SVishal Kulkarni #undef ulp_region
1055*7e6ad469SVishal Kulkarni 	md->base = 0;
1056*7e6ad469SVishal Kulkarni 	md->idx = ARRAY_SIZE(region);
1057*7e6ad469SVishal Kulkarni 	if (!is_t4(padap->params.chip)) {
1058*7e6ad469SVishal Kulkarni 		u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
1059*7e6ad469SVishal Kulkarni 		u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
1060*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
1061*7e6ad469SVishal Kulkarni 			if (sge_ctrl & F_VFIFO_ENABLE)
1062*7e6ad469SVishal Kulkarni 				size = G_DBVFIFO_SIZE(fifo_size);
1063*7e6ad469SVishal Kulkarni 		} else
1064*7e6ad469SVishal Kulkarni 			size = G_T6_DBVFIFO_SIZE(fifo_size);
1065*7e6ad469SVishal Kulkarni 
1066*7e6ad469SVishal Kulkarni 		if (size) {
1067*7e6ad469SVishal Kulkarni 			md->base = G_BASEADDR(t4_read_reg(padap,
1068*7e6ad469SVishal Kulkarni 							  A_SGE_DBVFIFO_BADDR));
1069*7e6ad469SVishal Kulkarni 			md->limit = md->base + (size << 2) - 1;
1070*7e6ad469SVishal Kulkarni 		}
1071*7e6ad469SVishal Kulkarni 	}
1072*7e6ad469SVishal Kulkarni 
1073*7e6ad469SVishal Kulkarni 	md++;
1074*7e6ad469SVishal Kulkarni 
1075*7e6ad469SVishal Kulkarni 	md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
1076*7e6ad469SVishal Kulkarni 	md->limit = 0;
1077*7e6ad469SVishal Kulkarni 	md++;
1078*7e6ad469SVishal Kulkarni 	md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1079*7e6ad469SVishal Kulkarni 	md->limit = 0;
1080*7e6ad469SVishal Kulkarni 	md++;
1081*7e6ad469SVishal Kulkarni #ifndef __NO_DRIVER_OCQ_SUPPORT__
1082*7e6ad469SVishal Kulkarni 	/*md->base = padap->vres.ocq.start;*/
1083*7e6ad469SVishal Kulkarni 	/*if (adap->vres.ocq.size)*/
1084*7e6ad469SVishal Kulkarni 	/*	  md->limit = md->base + adap->vres.ocq.size - 1;*/
1085*7e6ad469SVishal Kulkarni 	/*else*/
1086*7e6ad469SVishal Kulkarni 	md->idx = ARRAY_SIZE(region);  /* hide it */
1087*7e6ad469SVishal Kulkarni 	md++;
1088*7e6ad469SVishal Kulkarni #endif
1089*7e6ad469SVishal Kulkarni 
1090*7e6ad469SVishal Kulkarni 	/* add any address-space holes, there can be up to 3 */
1091*7e6ad469SVishal Kulkarni 	for (n = 0; n < i - 1; n++)
1092*7e6ad469SVishal Kulkarni 		if (meminfo_buff->avail[n].limit <
1093*7e6ad469SVishal Kulkarni 		    meminfo_buff->avail[n + 1].base)
1094*7e6ad469SVishal Kulkarni 			(md++)->base = meminfo_buff->avail[n].limit;
1095*7e6ad469SVishal Kulkarni 
1096*7e6ad469SVishal Kulkarni 	if (meminfo_buff->avail[n].limit)
1097*7e6ad469SVishal Kulkarni 		(md++)->base = meminfo_buff->avail[n].limit;
1098*7e6ad469SVishal Kulkarni 
1099*7e6ad469SVishal Kulkarni 	n = (int) (md - meminfo_buff->mem);
1100*7e6ad469SVishal Kulkarni 	meminfo_buff->mem_c = n;
1101*7e6ad469SVishal Kulkarni 
1102*7e6ad469SVishal Kulkarni 	qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1103*7e6ad469SVishal Kulkarni 	    mem_desc_cmp, NULL);
1104*7e6ad469SVishal Kulkarni 
1105*7e6ad469SVishal Kulkarni 	lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1106*7e6ad469SVishal Kulkarni 	hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1107*7e6ad469SVishal Kulkarni 	meminfo_buff->up_ram_lo = lo;
1108*7e6ad469SVishal Kulkarni 	meminfo_buff->up_ram_hi = hi;
1109*7e6ad469SVishal Kulkarni 
1110*7e6ad469SVishal Kulkarni 	lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1111*7e6ad469SVishal Kulkarni 	hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1112*7e6ad469SVishal Kulkarni 	meminfo_buff->up_extmem2_lo = lo;
1113*7e6ad469SVishal Kulkarni 	meminfo_buff->up_extmem2_hi = hi;
1114*7e6ad469SVishal Kulkarni 
1115*7e6ad469SVishal Kulkarni 	lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1116*7e6ad469SVishal Kulkarni 	meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1117*7e6ad469SVishal Kulkarni 	meminfo_buff->rx_pages_data[1] =
1118*7e6ad469SVishal Kulkarni 		t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1119*7e6ad469SVishal Kulkarni 	meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1120*7e6ad469SVishal Kulkarni 
1121*7e6ad469SVishal Kulkarni 	lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1122*7e6ad469SVishal Kulkarni 	hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1123*7e6ad469SVishal Kulkarni 	meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1124*7e6ad469SVishal Kulkarni 	meminfo_buff->tx_pages_data[1] =
1125*7e6ad469SVishal Kulkarni 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1126*7e6ad469SVishal Kulkarni 	meminfo_buff->tx_pages_data[2] =
1127*7e6ad469SVishal Kulkarni 		hi >= (1 << 20) ? 'M' : 'K';
1128*7e6ad469SVishal Kulkarni 	meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1129*7e6ad469SVishal Kulkarni 
1130*7e6ad469SVishal Kulkarni 	for (i = 0; i < 4; i++) {
1131*7e6ad469SVishal Kulkarni 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
1132*7e6ad469SVishal Kulkarni 			lo = t4_read_reg(padap,
1133*7e6ad469SVishal Kulkarni 					 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1134*7e6ad469SVishal Kulkarni 		else
1135*7e6ad469SVishal Kulkarni 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1136*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
1137*7e6ad469SVishal Kulkarni 			used = G_T5_USED(lo);
1138*7e6ad469SVishal Kulkarni 			alloc = G_T5_ALLOC(lo);
1139*7e6ad469SVishal Kulkarni 		} else {
1140*7e6ad469SVishal Kulkarni 			used = G_USED(lo);
1141*7e6ad469SVishal Kulkarni 			alloc = G_ALLOC(lo);
1142*7e6ad469SVishal Kulkarni 		}
1143*7e6ad469SVishal Kulkarni 		meminfo_buff->port_used[i] = used;
1144*7e6ad469SVishal Kulkarni 		meminfo_buff->port_alloc[i] = alloc;
1145*7e6ad469SVishal Kulkarni 	}
1146*7e6ad469SVishal Kulkarni 
1147*7e6ad469SVishal Kulkarni 	for (i = 0; i < padap->params.arch.nchan; i++) {
1148*7e6ad469SVishal Kulkarni 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
1149*7e6ad469SVishal Kulkarni 			lo = t4_read_reg(padap,
1150*7e6ad469SVishal Kulkarni 					 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1151*7e6ad469SVishal Kulkarni 		else
1152*7e6ad469SVishal Kulkarni 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1153*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
1154*7e6ad469SVishal Kulkarni 			used = G_T5_USED(lo);
1155*7e6ad469SVishal Kulkarni 			alloc = G_T5_ALLOC(lo);
1156*7e6ad469SVishal Kulkarni 		} else {
1157*7e6ad469SVishal Kulkarni 			used = G_USED(lo);
1158*7e6ad469SVishal Kulkarni 			alloc = G_ALLOC(lo);
1159*7e6ad469SVishal Kulkarni 		}
1160*7e6ad469SVishal Kulkarni 		meminfo_buff->loopback_used[i] = used;
1161*7e6ad469SVishal Kulkarni 		meminfo_buff->loopback_alloc[i] = alloc;
1162*7e6ad469SVishal Kulkarni 	}
1163*7e6ad469SVishal Kulkarni err:
1164*7e6ad469SVishal Kulkarni 	return rc;
1165*7e6ad469SVishal Kulkarni }
1166*7e6ad469SVishal Kulkarni 
1167*7e6ad469SVishal Kulkarni static int
1168*7e6ad469SVishal Kulkarni collect_meminfo(struct cudbg_init *pdbg_init,
1169*7e6ad469SVishal Kulkarni 		struct cudbg_buffer *dbg_buff,
1170*7e6ad469SVishal Kulkarni 		struct cudbg_error *cudbg_err)
1171*7e6ad469SVishal Kulkarni {
1172*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1173*7e6ad469SVishal Kulkarni 	struct struct_meminfo *meminfo_buff;
1174*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1175*7e6ad469SVishal Kulkarni 	int rc = 0;
1176*7e6ad469SVishal Kulkarni 	u32 size;
1177*7e6ad469SVishal Kulkarni 
1178*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_meminfo);
1179*7e6ad469SVishal Kulkarni 
1180*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1181*7e6ad469SVishal Kulkarni 	if (rc)
1182*7e6ad469SVishal Kulkarni 		goto err;
1183*7e6ad469SVishal Kulkarni 
1184*7e6ad469SVishal Kulkarni 	meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1185*7e6ad469SVishal Kulkarni 
1186*7e6ad469SVishal Kulkarni 	rc = fill_meminfo(padap, meminfo_buff);
1187*7e6ad469SVishal Kulkarni 	if (rc)
1188*7e6ad469SVishal Kulkarni 		goto err;
1189*7e6ad469SVishal Kulkarni 
1190*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1191*7e6ad469SVishal Kulkarni 	if (rc)
1192*7e6ad469SVishal Kulkarni 		goto err1;
1193*7e6ad469SVishal Kulkarni 
1194*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1195*7e6ad469SVishal Kulkarni err1:
1196*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1197*7e6ad469SVishal Kulkarni err:
1198*7e6ad469SVishal Kulkarni 	return rc;
1199*7e6ad469SVishal Kulkarni }
1200*7e6ad469SVishal Kulkarni 
1201*7e6ad469SVishal Kulkarni static int
1202*7e6ad469SVishal Kulkarni collect_lb_stats(struct cudbg_init *pdbg_init,
1203*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
1204*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
1205*7e6ad469SVishal Kulkarni {
1206*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1207*7e6ad469SVishal Kulkarni 	struct struct_lb_stats *lb_stats_buff;
1208*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1209*7e6ad469SVishal Kulkarni 	struct lb_port_stats *tmp_stats;
1210*7e6ad469SVishal Kulkarni 	u32 i, n, size;
1211*7e6ad469SVishal Kulkarni 	int rc = 0;
1212*7e6ad469SVishal Kulkarni 
1213*7e6ad469SVishal Kulkarni 	rc = padap->params.nports;
1214*7e6ad469SVishal Kulkarni 	if (rc < 0)
1215*7e6ad469SVishal Kulkarni 		goto err;
1216*7e6ad469SVishal Kulkarni 
1217*7e6ad469SVishal Kulkarni 	n = rc;
1218*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_lb_stats) +
1219*7e6ad469SVishal Kulkarni 	       n * sizeof(struct lb_port_stats);
1220*7e6ad469SVishal Kulkarni 
1221*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1222*7e6ad469SVishal Kulkarni 	if (rc)
1223*7e6ad469SVishal Kulkarni 		goto err;
1224*7e6ad469SVishal Kulkarni 
1225*7e6ad469SVishal Kulkarni 	lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1226*7e6ad469SVishal Kulkarni 
1227*7e6ad469SVishal Kulkarni 	lb_stats_buff->nchan = n;
1228*7e6ad469SVishal Kulkarni 	tmp_stats = lb_stats_buff->s;
1229*7e6ad469SVishal Kulkarni 
1230*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i += 2, tmp_stats += 2) {
1231*7e6ad469SVishal Kulkarni 		t4_get_lb_stats(padap, i, tmp_stats);
1232*7e6ad469SVishal Kulkarni 		t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1233*7e6ad469SVishal Kulkarni 	}
1234*7e6ad469SVishal Kulkarni 
1235*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1236*7e6ad469SVishal Kulkarni 	if (rc)
1237*7e6ad469SVishal Kulkarni 		goto err1;
1238*7e6ad469SVishal Kulkarni 
1239*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1240*7e6ad469SVishal Kulkarni err1:
1241*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1242*7e6ad469SVishal Kulkarni err:
1243*7e6ad469SVishal Kulkarni 	return rc;
1244*7e6ad469SVishal Kulkarni }
1245*7e6ad469SVishal Kulkarni 
1246*7e6ad469SVishal Kulkarni static int
1247*7e6ad469SVishal Kulkarni collect_rdma_stats(struct cudbg_init *pdbg_init,
1248*7e6ad469SVishal Kulkarni 		   struct cudbg_buffer *dbg_buff,
1249*7e6ad469SVishal Kulkarni 		   struct cudbg_error *cudbg_er)
1250*7e6ad469SVishal Kulkarni {
1251*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1252*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1253*7e6ad469SVishal Kulkarni 	struct tp_rdma_stats *rdma_stats_buff;
1254*7e6ad469SVishal Kulkarni 	u32 size;
1255*7e6ad469SVishal Kulkarni 	int rc = 0;
1256*7e6ad469SVishal Kulkarni 
1257*7e6ad469SVishal Kulkarni 	size = sizeof(struct tp_rdma_stats);
1258*7e6ad469SVishal Kulkarni 
1259*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1260*7e6ad469SVishal Kulkarni 	if (rc)
1261*7e6ad469SVishal Kulkarni 		goto err;
1262*7e6ad469SVishal Kulkarni 
1263*7e6ad469SVishal Kulkarni 	rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1264*7e6ad469SVishal Kulkarni 
1265*7e6ad469SVishal Kulkarni 	/* spin_lock(&padap->stats_lock);	TODO*/
1266*7e6ad469SVishal Kulkarni 	t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1267*7e6ad469SVishal Kulkarni 	/* spin_unlock(&padap->stats_lock);	TODO*/
1268*7e6ad469SVishal Kulkarni 
1269*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1270*7e6ad469SVishal Kulkarni 	if (rc)
1271*7e6ad469SVishal Kulkarni 		goto err1;
1272*7e6ad469SVishal Kulkarni 
1273*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1274*7e6ad469SVishal Kulkarni err1:
1275*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1276*7e6ad469SVishal Kulkarni err:
1277*7e6ad469SVishal Kulkarni 	return rc;
1278*7e6ad469SVishal Kulkarni }
1279*7e6ad469SVishal Kulkarni 
1280*7e6ad469SVishal Kulkarni static int
1281*7e6ad469SVishal Kulkarni collect_clk_info(struct cudbg_init *pdbg_init,
1282*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
1283*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
1284*7e6ad469SVishal Kulkarni {
1285*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1286*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1287*7e6ad469SVishal Kulkarni 	struct struct_clk_info *clk_info_buff;
1288*7e6ad469SVishal Kulkarni 	u64 tp_tick_us;
1289*7e6ad469SVishal Kulkarni 	int size;
1290*7e6ad469SVishal Kulkarni 	int rc = 0;
1291*7e6ad469SVishal Kulkarni 
1292*7e6ad469SVishal Kulkarni 	if (!padap->params.vpd.cclk) {
1293*7e6ad469SVishal Kulkarni 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1294*7e6ad469SVishal Kulkarni 		goto err;
1295*7e6ad469SVishal Kulkarni 	}
1296*7e6ad469SVishal Kulkarni 
1297*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_clk_info);
1298*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1299*7e6ad469SVishal Kulkarni 	if (rc)
1300*7e6ad469SVishal Kulkarni 		goto err;
1301*7e6ad469SVishal Kulkarni 
1302*7e6ad469SVishal Kulkarni 	clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1303*7e6ad469SVishal Kulkarni 
1304*7e6ad469SVishal Kulkarni 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1305*7e6ad469SVishal Kulkarni 	*/
1306*7e6ad469SVishal Kulkarni 	clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1307*7e6ad469SVishal Kulkarni 	clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1308*7e6ad469SVishal Kulkarni 	clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1309*7e6ad469SVishal Kulkarni 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1310*7e6ad469SVishal Kulkarni 	/* in us */
1311*7e6ad469SVishal Kulkarni 	clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1312*7e6ad469SVishal Kulkarni 				      clk_info_buff->dack_re) / 1000000) *
1313*7e6ad469SVishal Kulkarni 				     t4_read_reg(padap, A_TP_DACK_TIMER);
1314*7e6ad469SVishal Kulkarni 
1315*7e6ad469SVishal Kulkarni 	clk_info_buff->retransmit_min =
1316*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1317*7e6ad469SVishal Kulkarni 	clk_info_buff->retransmit_max =
1318*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1319*7e6ad469SVishal Kulkarni 
1320*7e6ad469SVishal Kulkarni 	clk_info_buff->persist_timer_min =
1321*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1322*7e6ad469SVishal Kulkarni 	clk_info_buff->persist_timer_max =
1323*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1324*7e6ad469SVishal Kulkarni 
1325*7e6ad469SVishal Kulkarni 	clk_info_buff->keepalive_idle_timer =
1326*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1327*7e6ad469SVishal Kulkarni 	clk_info_buff->keepalive_interval =
1328*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1329*7e6ad469SVishal Kulkarni 
1330*7e6ad469SVishal Kulkarni 	clk_info_buff->initial_srtt =
1331*7e6ad469SVishal Kulkarni 		tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1332*7e6ad469SVishal Kulkarni 	clk_info_buff->finwait2_timer =
1333*7e6ad469SVishal Kulkarni 		tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1334*7e6ad469SVishal Kulkarni 
1335*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1336*7e6ad469SVishal Kulkarni 
1337*7e6ad469SVishal Kulkarni 	if (rc)
1338*7e6ad469SVishal Kulkarni 		goto err1;
1339*7e6ad469SVishal Kulkarni 
1340*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1341*7e6ad469SVishal Kulkarni err1:
1342*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1343*7e6ad469SVishal Kulkarni err:
1344*7e6ad469SVishal Kulkarni 	return rc;
1345*7e6ad469SVishal Kulkarni 
1346*7e6ad469SVishal Kulkarni }
1347*7e6ad469SVishal Kulkarni 
1348*7e6ad469SVishal Kulkarni static int
1349*7e6ad469SVishal Kulkarni collect_macstats(struct cudbg_init *pdbg_init,
1350*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
1351*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
1352*7e6ad469SVishal Kulkarni {
1353*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1354*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1355*7e6ad469SVishal Kulkarni 	struct struct_mac_stats_rev1 *mac_stats_buff;
1356*7e6ad469SVishal Kulkarni 	u32 i, n, size;
1357*7e6ad469SVishal Kulkarni 	int rc = 0;
1358*7e6ad469SVishal Kulkarni 
1359*7e6ad469SVishal Kulkarni 	rc = padap->params.nports;
1360*7e6ad469SVishal Kulkarni 	if (rc < 0)
1361*7e6ad469SVishal Kulkarni 		goto err;
1362*7e6ad469SVishal Kulkarni 
1363*7e6ad469SVishal Kulkarni 	n = rc;
1364*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_mac_stats_rev1);
1365*7e6ad469SVishal Kulkarni 
1366*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1367*7e6ad469SVishal Kulkarni 	if (rc)
1368*7e6ad469SVishal Kulkarni 		goto err;
1369*7e6ad469SVishal Kulkarni 
1370*7e6ad469SVishal Kulkarni 	mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1371*7e6ad469SVishal Kulkarni 
1372*7e6ad469SVishal Kulkarni 	mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1373*7e6ad469SVishal Kulkarni 	mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1374*7e6ad469SVishal Kulkarni 	mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1375*7e6ad469SVishal Kulkarni 				       sizeof(struct cudbg_ver_hdr);
1376*7e6ad469SVishal Kulkarni 
1377*7e6ad469SVishal Kulkarni 	mac_stats_buff->port_count = n;
1378*7e6ad469SVishal Kulkarni 	for (i = 0; i <  mac_stats_buff->port_count; i++)
1379*7e6ad469SVishal Kulkarni 		t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1380*7e6ad469SVishal Kulkarni 
1381*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1382*7e6ad469SVishal Kulkarni 	if (rc)
1383*7e6ad469SVishal Kulkarni 		goto err1;
1384*7e6ad469SVishal Kulkarni 
1385*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1386*7e6ad469SVishal Kulkarni err1:
1387*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1388*7e6ad469SVishal Kulkarni err:
1389*7e6ad469SVishal Kulkarni 	return rc;
1390*7e6ad469SVishal Kulkarni }
1391*7e6ad469SVishal Kulkarni 
1392*7e6ad469SVishal Kulkarni static int
1393*7e6ad469SVishal Kulkarni collect_cim_pif_la(struct cudbg_init *pdbg_init,
1394*7e6ad469SVishal Kulkarni 		   struct cudbg_buffer *dbg_buff,
1395*7e6ad469SVishal Kulkarni 		   struct cudbg_error *cudbg_err)
1396*7e6ad469SVishal Kulkarni {
1397*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1398*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1399*7e6ad469SVishal Kulkarni 	struct cim_pif_la *cim_pif_la_buff;
1400*7e6ad469SVishal Kulkarni 	u32 size;
1401*7e6ad469SVishal Kulkarni 	int rc = 0;
1402*7e6ad469SVishal Kulkarni 
1403*7e6ad469SVishal Kulkarni 	size = sizeof(struct cim_pif_la) +
1404*7e6ad469SVishal Kulkarni 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1405*7e6ad469SVishal Kulkarni 
1406*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1407*7e6ad469SVishal Kulkarni 	if (rc)
1408*7e6ad469SVishal Kulkarni 		goto err;
1409*7e6ad469SVishal Kulkarni 
1410*7e6ad469SVishal Kulkarni 	cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1411*7e6ad469SVishal Kulkarni 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1412*7e6ad469SVishal Kulkarni 
1413*7e6ad469SVishal Kulkarni 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1414*7e6ad469SVishal Kulkarni 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1415*7e6ad469SVishal Kulkarni 			   NULL, NULL);
1416*7e6ad469SVishal Kulkarni 
1417*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1418*7e6ad469SVishal Kulkarni 	if (rc)
1419*7e6ad469SVishal Kulkarni 		goto err1;
1420*7e6ad469SVishal Kulkarni 
1421*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1422*7e6ad469SVishal Kulkarni err1:
1423*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1424*7e6ad469SVishal Kulkarni err:
1425*7e6ad469SVishal Kulkarni 	return rc;
1426*7e6ad469SVishal Kulkarni }
1427*7e6ad469SVishal Kulkarni 
1428*7e6ad469SVishal Kulkarni static int
1429*7e6ad469SVishal Kulkarni collect_tp_la(struct cudbg_init *pdbg_init,
1430*7e6ad469SVishal Kulkarni 	      struct cudbg_buffer *dbg_buff,
1431*7e6ad469SVishal Kulkarni 	      struct cudbg_error *cudbg_err)
1432*7e6ad469SVishal Kulkarni {
1433*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1434*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1435*7e6ad469SVishal Kulkarni 	struct struct_tp_la *tp_la_buff;
1436*7e6ad469SVishal Kulkarni 	u32 size;
1437*7e6ad469SVishal Kulkarni 	int rc = 0;
1438*7e6ad469SVishal Kulkarni 
1439*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1440*7e6ad469SVishal Kulkarni 
1441*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1442*7e6ad469SVishal Kulkarni 	if (rc)
1443*7e6ad469SVishal Kulkarni 		goto err;
1444*7e6ad469SVishal Kulkarni 
1445*7e6ad469SVishal Kulkarni 	tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1446*7e6ad469SVishal Kulkarni 
1447*7e6ad469SVishal Kulkarni 	tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1448*7e6ad469SVishal Kulkarni 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1449*7e6ad469SVishal Kulkarni 
1450*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1451*7e6ad469SVishal Kulkarni 	if (rc)
1452*7e6ad469SVishal Kulkarni 		goto err1;
1453*7e6ad469SVishal Kulkarni 
1454*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1455*7e6ad469SVishal Kulkarni err1:
1456*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1457*7e6ad469SVishal Kulkarni err:
1458*7e6ad469SVishal Kulkarni 	return rc;
1459*7e6ad469SVishal Kulkarni }
1460*7e6ad469SVishal Kulkarni 
1461*7e6ad469SVishal Kulkarni static int
1462*7e6ad469SVishal Kulkarni collect_fcoe_stats(struct cudbg_init *pdbg_init,
1463*7e6ad469SVishal Kulkarni 		   struct cudbg_buffer *dbg_buff,
1464*7e6ad469SVishal Kulkarni 		   struct cudbg_error *cudbg_err)
1465*7e6ad469SVishal Kulkarni {
1466*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1467*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1468*7e6ad469SVishal Kulkarni 	struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1469*7e6ad469SVishal Kulkarni 	u32 size;
1470*7e6ad469SVishal Kulkarni 	int rc = 0;
1471*7e6ad469SVishal Kulkarni 
1472*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_tp_fcoe_stats);
1473*7e6ad469SVishal Kulkarni 
1474*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1475*7e6ad469SVishal Kulkarni 	if (rc)
1476*7e6ad469SVishal Kulkarni 		goto err;
1477*7e6ad469SVishal Kulkarni 
1478*7e6ad469SVishal Kulkarni 	tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1479*7e6ad469SVishal Kulkarni 
1480*7e6ad469SVishal Kulkarni 	t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1481*7e6ad469SVishal Kulkarni 	t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1482*7e6ad469SVishal Kulkarni 
1483*7e6ad469SVishal Kulkarni 	if (padap->params.arch.nchan == NCHAN) {
1484*7e6ad469SVishal Kulkarni 		t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1485*7e6ad469SVishal Kulkarni 		t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1486*7e6ad469SVishal Kulkarni 	}
1487*7e6ad469SVishal Kulkarni 
1488*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1489*7e6ad469SVishal Kulkarni 	if (rc)
1490*7e6ad469SVishal Kulkarni 		goto err1;
1491*7e6ad469SVishal Kulkarni 
1492*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1493*7e6ad469SVishal Kulkarni err1:
1494*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1495*7e6ad469SVishal Kulkarni err:
1496*7e6ad469SVishal Kulkarni 	return rc;
1497*7e6ad469SVishal Kulkarni }
1498*7e6ad469SVishal Kulkarni 
1499*7e6ad469SVishal Kulkarni static int
1500*7e6ad469SVishal Kulkarni collect_tp_err_stats(struct cudbg_init *pdbg_init,
1501*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
1502*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
1503*7e6ad469SVishal Kulkarni {
1504*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1505*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1506*7e6ad469SVishal Kulkarni 	struct struct_tp_err_stats *tp_err_stats_buff;
1507*7e6ad469SVishal Kulkarni 	u32 size;
1508*7e6ad469SVishal Kulkarni 	int rc = 0;
1509*7e6ad469SVishal Kulkarni 
1510*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_tp_err_stats);
1511*7e6ad469SVishal Kulkarni 
1512*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1513*7e6ad469SVishal Kulkarni 	if (rc)
1514*7e6ad469SVishal Kulkarni 		goto err;
1515*7e6ad469SVishal Kulkarni 
1516*7e6ad469SVishal Kulkarni 	tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1517*7e6ad469SVishal Kulkarni 
1518*7e6ad469SVishal Kulkarni 	t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1519*7e6ad469SVishal Kulkarni 	tp_err_stats_buff->nchan = padap->params.arch.nchan;
1520*7e6ad469SVishal Kulkarni 
1521*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1522*7e6ad469SVishal Kulkarni 	if (rc)
1523*7e6ad469SVishal Kulkarni 		goto err1;
1524*7e6ad469SVishal Kulkarni 
1525*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1526*7e6ad469SVishal Kulkarni err1:
1527*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1528*7e6ad469SVishal Kulkarni err:
1529*7e6ad469SVishal Kulkarni 	return rc;
1530*7e6ad469SVishal Kulkarni }
1531*7e6ad469SVishal Kulkarni 
1532*7e6ad469SVishal Kulkarni static int
1533*7e6ad469SVishal Kulkarni collect_tcp_stats(struct cudbg_init *pdbg_init,
1534*7e6ad469SVishal Kulkarni 		  struct cudbg_buffer *dbg_buff,
1535*7e6ad469SVishal Kulkarni 		  struct cudbg_error *cudbg_err)
1536*7e6ad469SVishal Kulkarni {
1537*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1538*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1539*7e6ad469SVishal Kulkarni 	struct struct_tcp_stats *tcp_stats_buff;
1540*7e6ad469SVishal Kulkarni 	u32 size;
1541*7e6ad469SVishal Kulkarni 	int rc = 0;
1542*7e6ad469SVishal Kulkarni 
1543*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_tcp_stats);
1544*7e6ad469SVishal Kulkarni 
1545*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1546*7e6ad469SVishal Kulkarni 	if (rc)
1547*7e6ad469SVishal Kulkarni 		goto err;
1548*7e6ad469SVishal Kulkarni 
1549*7e6ad469SVishal Kulkarni 	tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1550*7e6ad469SVishal Kulkarni 
1551*7e6ad469SVishal Kulkarni 	/* spin_lock(&padap->stats_lock);	TODO*/
1552*7e6ad469SVishal Kulkarni 	t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1553*7e6ad469SVishal Kulkarni 	/* spin_unlock(&padap->stats_lock);	TODO*/
1554*7e6ad469SVishal Kulkarni 
1555*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1556*7e6ad469SVishal Kulkarni 	if (rc)
1557*7e6ad469SVishal Kulkarni 		goto err1;
1558*7e6ad469SVishal Kulkarni 
1559*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1560*7e6ad469SVishal Kulkarni err1:
1561*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1562*7e6ad469SVishal Kulkarni err:
1563*7e6ad469SVishal Kulkarni 	return rc;
1564*7e6ad469SVishal Kulkarni }
1565*7e6ad469SVishal Kulkarni 
1566*7e6ad469SVishal Kulkarni static int
1567*7e6ad469SVishal Kulkarni collect_hw_sched(struct cudbg_init *pdbg_init,
1568*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
1569*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
1570*7e6ad469SVishal Kulkarni {
1571*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1572*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1573*7e6ad469SVishal Kulkarni 	struct struct_hw_sched *hw_sched_buff;
1574*7e6ad469SVishal Kulkarni 	u32 size;
1575*7e6ad469SVishal Kulkarni 	int i, rc = 0;
1576*7e6ad469SVishal Kulkarni 
1577*7e6ad469SVishal Kulkarni 	if (!padap->params.vpd.cclk) {
1578*7e6ad469SVishal Kulkarni 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1579*7e6ad469SVishal Kulkarni 		goto err;
1580*7e6ad469SVishal Kulkarni 	}
1581*7e6ad469SVishal Kulkarni 
1582*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_hw_sched);
1583*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1584*7e6ad469SVishal Kulkarni 	if (rc)
1585*7e6ad469SVishal Kulkarni 		goto err;
1586*7e6ad469SVishal Kulkarni 
1587*7e6ad469SVishal Kulkarni 	hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1588*7e6ad469SVishal Kulkarni 
1589*7e6ad469SVishal Kulkarni 	hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1590*7e6ad469SVishal Kulkarni 	hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1591*7e6ad469SVishal Kulkarni 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1592*7e6ad469SVishal Kulkarni 
1593*7e6ad469SVishal Kulkarni 	for (i = 0; i < NTX_SCHED; ++i) {
1594*7e6ad469SVishal Kulkarni 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1595*7e6ad469SVishal Kulkarni 		    &hw_sched_buff->ipg[i], 1);
1596*7e6ad469SVishal Kulkarni 	}
1597*7e6ad469SVishal Kulkarni 
1598*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1599*7e6ad469SVishal Kulkarni 	if (rc)
1600*7e6ad469SVishal Kulkarni 		goto err1;
1601*7e6ad469SVishal Kulkarni 
1602*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1603*7e6ad469SVishal Kulkarni err1:
1604*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1605*7e6ad469SVishal Kulkarni err:
1606*7e6ad469SVishal Kulkarni 	return rc;
1607*7e6ad469SVishal Kulkarni }
1608*7e6ad469SVishal Kulkarni 
1609*7e6ad469SVishal Kulkarni static int
1610*7e6ad469SVishal Kulkarni collect_pm_stats(struct cudbg_init *pdbg_init,
1611*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
1612*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
1613*7e6ad469SVishal Kulkarni {
1614*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1615*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1616*7e6ad469SVishal Kulkarni 	struct struct_pm_stats *pm_stats_buff;
1617*7e6ad469SVishal Kulkarni 	u32 size;
1618*7e6ad469SVishal Kulkarni 	int rc = 0;
1619*7e6ad469SVishal Kulkarni 
1620*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_pm_stats);
1621*7e6ad469SVishal Kulkarni 
1622*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1623*7e6ad469SVishal Kulkarni 	if (rc)
1624*7e6ad469SVishal Kulkarni 		goto err;
1625*7e6ad469SVishal Kulkarni 
1626*7e6ad469SVishal Kulkarni 	pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1627*7e6ad469SVishal Kulkarni 
1628*7e6ad469SVishal Kulkarni 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1629*7e6ad469SVishal Kulkarni 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1630*7e6ad469SVishal Kulkarni 
1631*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1632*7e6ad469SVishal Kulkarni 	if (rc)
1633*7e6ad469SVishal Kulkarni 		goto err1;
1634*7e6ad469SVishal Kulkarni 
1635*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1636*7e6ad469SVishal Kulkarni err1:
1637*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1638*7e6ad469SVishal Kulkarni err:
1639*7e6ad469SVishal Kulkarni 	return rc;
1640*7e6ad469SVishal Kulkarni }
1641*7e6ad469SVishal Kulkarni 
1642*7e6ad469SVishal Kulkarni static int
1643*7e6ad469SVishal Kulkarni collect_path_mtu(struct cudbg_init *pdbg_init,
1644*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
1645*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
1646*7e6ad469SVishal Kulkarni {
1647*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1648*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1649*7e6ad469SVishal Kulkarni 	u32 size;
1650*7e6ad469SVishal Kulkarni 	int rc = 0;
1651*7e6ad469SVishal Kulkarni 
1652*7e6ad469SVishal Kulkarni 	size = NMTUS  * sizeof(u16);
1653*7e6ad469SVishal Kulkarni 
1654*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1655*7e6ad469SVishal Kulkarni 	if (rc)
1656*7e6ad469SVishal Kulkarni 		goto err;
1657*7e6ad469SVishal Kulkarni 
1658*7e6ad469SVishal Kulkarni 	t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1659*7e6ad469SVishal Kulkarni 
1660*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1661*7e6ad469SVishal Kulkarni 	if (rc)
1662*7e6ad469SVishal Kulkarni 		goto err1;
1663*7e6ad469SVishal Kulkarni 
1664*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1665*7e6ad469SVishal Kulkarni err1:
1666*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1667*7e6ad469SVishal Kulkarni err:
1668*7e6ad469SVishal Kulkarni 	return rc;
1669*7e6ad469SVishal Kulkarni }
1670*7e6ad469SVishal Kulkarni 
1671*7e6ad469SVishal Kulkarni static int
1672*7e6ad469SVishal Kulkarni collect_rss_key(struct cudbg_init *pdbg_init,
1673*7e6ad469SVishal Kulkarni 		struct cudbg_buffer *dbg_buff,
1674*7e6ad469SVishal Kulkarni 		struct cudbg_error *cudbg_err)
1675*7e6ad469SVishal Kulkarni {
1676*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1677*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1678*7e6ad469SVishal Kulkarni 	u32 size;
1679*7e6ad469SVishal Kulkarni 
1680*7e6ad469SVishal Kulkarni 	int rc = 0;
1681*7e6ad469SVishal Kulkarni 
1682*7e6ad469SVishal Kulkarni 	size = 10  * sizeof(u32);
1683*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1684*7e6ad469SVishal Kulkarni 	if (rc)
1685*7e6ad469SVishal Kulkarni 		goto err;
1686*7e6ad469SVishal Kulkarni 
1687*7e6ad469SVishal Kulkarni 	t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1688*7e6ad469SVishal Kulkarni 
1689*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1690*7e6ad469SVishal Kulkarni 	if (rc)
1691*7e6ad469SVishal Kulkarni 		goto err1;
1692*7e6ad469SVishal Kulkarni 
1693*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1694*7e6ad469SVishal Kulkarni err1:
1695*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1696*7e6ad469SVishal Kulkarni err:
1697*7e6ad469SVishal Kulkarni 	return rc;
1698*7e6ad469SVishal Kulkarni }
1699*7e6ad469SVishal Kulkarni 
1700*7e6ad469SVishal Kulkarni static int
1701*7e6ad469SVishal Kulkarni collect_rss_config(struct cudbg_init *pdbg_init,
1702*7e6ad469SVishal Kulkarni 		   struct cudbg_buffer *dbg_buff,
1703*7e6ad469SVishal Kulkarni 		   struct cudbg_error *cudbg_err)
1704*7e6ad469SVishal Kulkarni {
1705*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1706*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1707*7e6ad469SVishal Kulkarni 	struct rss_config *rss_conf;
1708*7e6ad469SVishal Kulkarni 	int rc;
1709*7e6ad469SVishal Kulkarni 	u32 size;
1710*7e6ad469SVishal Kulkarni 
1711*7e6ad469SVishal Kulkarni 	size = sizeof(struct rss_config);
1712*7e6ad469SVishal Kulkarni 
1713*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1714*7e6ad469SVishal Kulkarni 	if (rc)
1715*7e6ad469SVishal Kulkarni 		goto err;
1716*7e6ad469SVishal Kulkarni 
1717*7e6ad469SVishal Kulkarni 	rss_conf =  (struct rss_config *)scratch_buff.data;
1718*7e6ad469SVishal Kulkarni 
1719*7e6ad469SVishal Kulkarni 	rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1720*7e6ad469SVishal Kulkarni 	rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1721*7e6ad469SVishal Kulkarni 	rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1722*7e6ad469SVishal Kulkarni 	rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1723*7e6ad469SVishal Kulkarni 	rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1724*7e6ad469SVishal Kulkarni 	rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1725*7e6ad469SVishal Kulkarni 	rss_conf->chip = padap->params.chip;
1726*7e6ad469SVishal Kulkarni 
1727*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1728*7e6ad469SVishal Kulkarni 	if (rc)
1729*7e6ad469SVishal Kulkarni 		goto err1;
1730*7e6ad469SVishal Kulkarni 
1731*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1732*7e6ad469SVishal Kulkarni 
1733*7e6ad469SVishal Kulkarni err1:
1734*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1735*7e6ad469SVishal Kulkarni err:
1736*7e6ad469SVishal Kulkarni 	return rc;
1737*7e6ad469SVishal Kulkarni }
1738*7e6ad469SVishal Kulkarni 
1739*7e6ad469SVishal Kulkarni static int
1740*7e6ad469SVishal Kulkarni collect_rss_vf_config(struct cudbg_init *pdbg_init,
1741*7e6ad469SVishal Kulkarni 		      struct cudbg_buffer *dbg_buff,
1742*7e6ad469SVishal Kulkarni 		      struct cudbg_error *cudbg_err)
1743*7e6ad469SVishal Kulkarni {
1744*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1745*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1746*7e6ad469SVishal Kulkarni 	struct rss_vf_conf *vfconf;
1747*7e6ad469SVishal Kulkarni 	int vf, rc, vf_count = 0;
1748*7e6ad469SVishal Kulkarni 	u32 size;
1749*7e6ad469SVishal Kulkarni 
1750*7e6ad469SVishal Kulkarni 	vf_count = padap->params.arch.vfcount;
1751*7e6ad469SVishal Kulkarni 	size = vf_count * sizeof(*vfconf);
1752*7e6ad469SVishal Kulkarni 
1753*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1754*7e6ad469SVishal Kulkarni 	if (rc)
1755*7e6ad469SVishal Kulkarni 		goto err;
1756*7e6ad469SVishal Kulkarni 
1757*7e6ad469SVishal Kulkarni 	vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1758*7e6ad469SVishal Kulkarni 
1759*7e6ad469SVishal Kulkarni 	for (vf = 0; vf < vf_count; vf++) {
1760*7e6ad469SVishal Kulkarni 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1761*7e6ad469SVishal Kulkarni 				      &vfconf[vf].rss_vf_vfh, 1);
1762*7e6ad469SVishal Kulkarni 	}
1763*7e6ad469SVishal Kulkarni 
1764*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1765*7e6ad469SVishal Kulkarni 	if (rc)
1766*7e6ad469SVishal Kulkarni 		goto err1;
1767*7e6ad469SVishal Kulkarni 
1768*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1769*7e6ad469SVishal Kulkarni 
1770*7e6ad469SVishal Kulkarni err1:
1771*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1772*7e6ad469SVishal Kulkarni err:
1773*7e6ad469SVishal Kulkarni 	return rc;
1774*7e6ad469SVishal Kulkarni }
1775*7e6ad469SVishal Kulkarni 
1776*7e6ad469SVishal Kulkarni static int
1777*7e6ad469SVishal Kulkarni collect_rss_pf_config(struct cudbg_init *pdbg_init,
1778*7e6ad469SVishal Kulkarni 		      struct cudbg_buffer *dbg_buff,
1779*7e6ad469SVishal Kulkarni 		      struct cudbg_error *cudbg_err)
1780*7e6ad469SVishal Kulkarni {
1781*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1782*7e6ad469SVishal Kulkarni 	struct rss_pf_conf *pfconf;
1783*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1784*7e6ad469SVishal Kulkarni 	u32 rss_pf_map, rss_pf_mask, size;
1785*7e6ad469SVishal Kulkarni 	int pf, rc;
1786*7e6ad469SVishal Kulkarni 
1787*7e6ad469SVishal Kulkarni 	size = 8  * sizeof(*pfconf);
1788*7e6ad469SVishal Kulkarni 
1789*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1790*7e6ad469SVishal Kulkarni 	if (rc)
1791*7e6ad469SVishal Kulkarni 		goto err;
1792*7e6ad469SVishal Kulkarni 
1793*7e6ad469SVishal Kulkarni 	pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1794*7e6ad469SVishal Kulkarni 
1795*7e6ad469SVishal Kulkarni 	rss_pf_map = t4_read_rss_pf_map(padap, 1);
1796*7e6ad469SVishal Kulkarni 	rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1797*7e6ad469SVishal Kulkarni 
1798*7e6ad469SVishal Kulkarni 	for (pf = 0; pf < 8; pf++) {
1799*7e6ad469SVishal Kulkarni 		pfconf[pf].rss_pf_map = rss_pf_map;
1800*7e6ad469SVishal Kulkarni 		pfconf[pf].rss_pf_mask = rss_pf_mask;
1801*7e6ad469SVishal Kulkarni 		/* no return val */
1802*7e6ad469SVishal Kulkarni 		t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1803*7e6ad469SVishal Kulkarni 	}
1804*7e6ad469SVishal Kulkarni 
1805*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1806*7e6ad469SVishal Kulkarni 	if (rc)
1807*7e6ad469SVishal Kulkarni 		goto err1;
1808*7e6ad469SVishal Kulkarni 
1809*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
1810*7e6ad469SVishal Kulkarni err1:
1811*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
1812*7e6ad469SVishal Kulkarni err:
1813*7e6ad469SVishal Kulkarni 	return rc;
1814*7e6ad469SVishal Kulkarni }
1815*7e6ad469SVishal Kulkarni 
1816*7e6ad469SVishal Kulkarni static int
1817*7e6ad469SVishal Kulkarni check_valid(u32 *buf, int type)
1818*7e6ad469SVishal Kulkarni {
1819*7e6ad469SVishal Kulkarni 	int index;
1820*7e6ad469SVishal Kulkarni 	int bit;
1821*7e6ad469SVishal Kulkarni 	int bit_pos = 0;
1822*7e6ad469SVishal Kulkarni 
1823*7e6ad469SVishal Kulkarni 	switch (type) {
1824*7e6ad469SVishal Kulkarni 	case CTXT_EGRESS:
1825*7e6ad469SVishal Kulkarni 		bit_pos = 176;
1826*7e6ad469SVishal Kulkarni 		break;
1827*7e6ad469SVishal Kulkarni 	case CTXT_INGRESS:
1828*7e6ad469SVishal Kulkarni 		bit_pos = 141;
1829*7e6ad469SVishal Kulkarni 		break;
1830*7e6ad469SVishal Kulkarni 	case CTXT_FLM:
1831*7e6ad469SVishal Kulkarni 		bit_pos = 89;
1832*7e6ad469SVishal Kulkarni 		break;
1833*7e6ad469SVishal Kulkarni 	}
1834*7e6ad469SVishal Kulkarni 	index = bit_pos / 32;
1835*7e6ad469SVishal Kulkarni 	bit =  bit_pos % 32;
1836*7e6ad469SVishal Kulkarni 
1837*7e6ad469SVishal Kulkarni 	return buf[index] & (1U << bit);
1838*7e6ad469SVishal Kulkarni }
1839*7e6ad469SVishal Kulkarni 
1840*7e6ad469SVishal Kulkarni /**
1841*7e6ad469SVishal Kulkarni  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1842*7e6ad469SVishal Kulkarni  *
1843*7e6ad469SVishal Kulkarni  * For EGRESS and INGRESS, do the following calculation.
1844*7e6ad469SVishal Kulkarni  * max_qid = (DBQ/IMSG context region size in bytes) /
1845*7e6ad469SVishal Kulkarni  *	     (size of context in bytes).
1846*7e6ad469SVishal Kulkarni  *
1847*7e6ad469SVishal Kulkarni  * For FLM, do the following calculation.
1848*7e6ad469SVishal Kulkarni  * max_qid = (FLM cache region size in bytes) /
1849*7e6ad469SVishal Kulkarni  *	     ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1850*7e6ad469SVishal Kulkarni  *
1851*7e6ad469SVishal Kulkarni  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1852*7e6ad469SVishal Kulkarni  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1853*7e6ad469SVishal Kulkarni  * splitting is enabled, then max CNM qid is half of max FLM qid.
1854*7e6ad469SVishal Kulkarni  */
1855*7e6ad469SVishal Kulkarni static int
1856*7e6ad469SVishal Kulkarni get_max_ctxt_qid(struct adapter *padap,
1857*7e6ad469SVishal Kulkarni 		 struct struct_meminfo *meminfo,
1858*7e6ad469SVishal Kulkarni 		 u32 *max_ctx_qid, u8 nelem)
1859*7e6ad469SVishal Kulkarni {
1860*7e6ad469SVishal Kulkarni 	u32 i, idx, found = 0;
1861*7e6ad469SVishal Kulkarni 
1862*7e6ad469SVishal Kulkarni 	if (nelem != (CTXT_CNM + 1))
1863*7e6ad469SVishal Kulkarni 		return -EINVAL;
1864*7e6ad469SVishal Kulkarni 
1865*7e6ad469SVishal Kulkarni 	for (i = 0; i < meminfo->mem_c; i++) {
1866*7e6ad469SVishal Kulkarni 		if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1867*7e6ad469SVishal Kulkarni 			continue;                        /* skip holes */
1868*7e6ad469SVishal Kulkarni 
1869*7e6ad469SVishal Kulkarni 		idx = meminfo->mem[i].idx;
1870*7e6ad469SVishal Kulkarni 		/* Get DBQ, IMSG, and FLM context region size */
1871*7e6ad469SVishal Kulkarni 		if (idx <= CTXT_FLM) {
1872*7e6ad469SVishal Kulkarni 			if (!(meminfo->mem[i].limit))
1873*7e6ad469SVishal Kulkarni 				meminfo->mem[i].limit =
1874*7e6ad469SVishal Kulkarni 					i < meminfo->mem_c - 1 ?
1875*7e6ad469SVishal Kulkarni 					meminfo->mem[i + 1].base - 1 : ~0;
1876*7e6ad469SVishal Kulkarni 
1877*7e6ad469SVishal Kulkarni 			if (idx < CTXT_FLM) {
1878*7e6ad469SVishal Kulkarni 				/* Get EGRESS and INGRESS max qid. */
1879*7e6ad469SVishal Kulkarni 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1880*7e6ad469SVishal Kulkarni 						    meminfo->mem[i].base + 1) /
1881*7e6ad469SVishal Kulkarni 						   CUDBG_CTXT_SIZE_BYTES;
1882*7e6ad469SVishal Kulkarni 				found++;
1883*7e6ad469SVishal Kulkarni 			} else {
1884*7e6ad469SVishal Kulkarni 				/* Get FLM and CNM max qid. */
1885*7e6ad469SVishal Kulkarni 				u32 value, edram_ptr_count;
1886*7e6ad469SVishal Kulkarni 				u8 bytes_per_ptr = 8;
1887*7e6ad469SVishal Kulkarni 				u8 nohdr;
1888*7e6ad469SVishal Kulkarni 
1889*7e6ad469SVishal Kulkarni 				value = t4_read_reg(padap, A_SGE_FLM_CFG);
1890*7e6ad469SVishal Kulkarni 
1891*7e6ad469SVishal Kulkarni 				/* Check if header splitting is enabled. */
1892*7e6ad469SVishal Kulkarni 				nohdr = (value >> S_NOHDR) & 1U;
1893*7e6ad469SVishal Kulkarni 
1894*7e6ad469SVishal Kulkarni 				/* Get the number of pointers in EDRAM per
1895*7e6ad469SVishal Kulkarni 				 * qid in units of 32.
1896*7e6ad469SVishal Kulkarni 				 */
1897*7e6ad469SVishal Kulkarni 				edram_ptr_count = 32 *
1898*7e6ad469SVishal Kulkarni 						  (1U << G_EDRAMPTRCNT(value));
1899*7e6ad469SVishal Kulkarni 
1900*7e6ad469SVishal Kulkarni 				/* EDRAMPTRCNT value of 3 is reserved.
1901*7e6ad469SVishal Kulkarni 				 * So don't exceed 128.
1902*7e6ad469SVishal Kulkarni 				 */
1903*7e6ad469SVishal Kulkarni 				if (edram_ptr_count > 128)
1904*7e6ad469SVishal Kulkarni 					edram_ptr_count = 128;
1905*7e6ad469SVishal Kulkarni 
1906*7e6ad469SVishal Kulkarni 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1907*7e6ad469SVishal Kulkarni 						    meminfo->mem[i].base + 1) /
1908*7e6ad469SVishal Kulkarni 						   (edram_ptr_count *
1909*7e6ad469SVishal Kulkarni 						    bytes_per_ptr);
1910*7e6ad469SVishal Kulkarni 				found++;
1911*7e6ad469SVishal Kulkarni 
1912*7e6ad469SVishal Kulkarni 				/* CNM has 1-to-1 mapping with FLM.
1913*7e6ad469SVishal Kulkarni 				 * However, if header splitting is enabled,
1914*7e6ad469SVishal Kulkarni 				 * then max CNM qid is half of max FLM qid.
1915*7e6ad469SVishal Kulkarni 				 */
1916*7e6ad469SVishal Kulkarni 				max_ctx_qid[CTXT_CNM] = nohdr ?
1917*7e6ad469SVishal Kulkarni 							max_ctx_qid[idx] :
1918*7e6ad469SVishal Kulkarni 							max_ctx_qid[idx] >> 1;
1919*7e6ad469SVishal Kulkarni 
1920*7e6ad469SVishal Kulkarni 				/* One more increment for CNM */
1921*7e6ad469SVishal Kulkarni 				found++;
1922*7e6ad469SVishal Kulkarni 			}
1923*7e6ad469SVishal Kulkarni 		}
1924*7e6ad469SVishal Kulkarni 		if (found == nelem)
1925*7e6ad469SVishal Kulkarni 			break;
1926*7e6ad469SVishal Kulkarni 	}
1927*7e6ad469SVishal Kulkarni 
1928*7e6ad469SVishal Kulkarni 	/* Sanity check. Ensure the values are within known max. */
1929*7e6ad469SVishal Kulkarni 	max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1930*7e6ad469SVishal Kulkarni 					 M_CTXTQID);
1931*7e6ad469SVishal Kulkarni 	max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1932*7e6ad469SVishal Kulkarni 					  CUDBG_MAX_INGRESS_QIDS);
1933*7e6ad469SVishal Kulkarni 	max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1934*7e6ad469SVishal Kulkarni 				      CUDBG_MAX_FL_QIDS);
1935*7e6ad469SVishal Kulkarni 	max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1936*7e6ad469SVishal Kulkarni 				      CUDBG_MAX_CNM_QIDS);
1937*7e6ad469SVishal Kulkarni 	return 0;
1938*7e6ad469SVishal Kulkarni }
1939*7e6ad469SVishal Kulkarni 
1940*7e6ad469SVishal Kulkarni static int
1941*7e6ad469SVishal Kulkarni collect_dump_context(struct cudbg_init *pdbg_init,
1942*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
1943*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
1944*7e6ad469SVishal Kulkarni {
1945*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
1946*7e6ad469SVishal Kulkarni 	struct cudbg_buffer temp_buff;
1947*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
1948*7e6ad469SVishal Kulkarni 	u32 size = 0, next_offset = 0, total_size = 0;
1949*7e6ad469SVishal Kulkarni 	struct cudbg_ch_cntxt *buff = NULL;
1950*7e6ad469SVishal Kulkarni 	struct struct_meminfo meminfo;
1951*7e6ad469SVishal Kulkarni 	int bytes = 0;
1952*7e6ad469SVishal Kulkarni 	int rc = 0;
1953*7e6ad469SVishal Kulkarni 	u32 i, j;
1954*7e6ad469SVishal Kulkarni 	u32 max_ctx_qid[CTXT_CNM + 1];
1955*7e6ad469SVishal Kulkarni 	bool limit_qid = false;
1956*7e6ad469SVishal Kulkarni 	u32 qid_count = 0;
1957*7e6ad469SVishal Kulkarni 
1958*7e6ad469SVishal Kulkarni 	rc = fill_meminfo(padap, &meminfo);
1959*7e6ad469SVishal Kulkarni 	if (rc)
1960*7e6ad469SVishal Kulkarni 		goto err;
1961*7e6ad469SVishal Kulkarni 
1962*7e6ad469SVishal Kulkarni 	/* Get max valid qid for each type of queue */
1963*7e6ad469SVishal Kulkarni 	rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1964*7e6ad469SVishal Kulkarni 	if (rc)
1965*7e6ad469SVishal Kulkarni 		goto err;
1966*7e6ad469SVishal Kulkarni 
1967*7e6ad469SVishal Kulkarni 	/* There are four types of queues. Collect context upto max
1968*7e6ad469SVishal Kulkarni 	 * qid of each type of queue.
1969*7e6ad469SVishal Kulkarni 	 */
1970*7e6ad469SVishal Kulkarni 	for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1971*7e6ad469SVishal Kulkarni 		size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1972*7e6ad469SVishal Kulkarni 
1973*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1974*7e6ad469SVishal Kulkarni 	if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1975*7e6ad469SVishal Kulkarni 		/* Not enough scratch Memory available.
1976*7e6ad469SVishal Kulkarni 		 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1977*7e6ad469SVishal Kulkarni 		 * for each queue type.
1978*7e6ad469SVishal Kulkarni 		 */
1979*7e6ad469SVishal Kulkarni 		size = 0;
1980*7e6ad469SVishal Kulkarni 		for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1981*7e6ad469SVishal Kulkarni 			size += sizeof(struct cudbg_ch_cntxt) *
1982*7e6ad469SVishal Kulkarni 				CUDBG_LOWMEM_MAX_CTXT_QIDS;
1983*7e6ad469SVishal Kulkarni 
1984*7e6ad469SVishal Kulkarni 		limit_qid = true;
1985*7e6ad469SVishal Kulkarni 		rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1986*7e6ad469SVishal Kulkarni 		if (rc)
1987*7e6ad469SVishal Kulkarni 			goto err;
1988*7e6ad469SVishal Kulkarni 	}
1989*7e6ad469SVishal Kulkarni 
1990*7e6ad469SVishal Kulkarni 	buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1991*7e6ad469SVishal Kulkarni 
1992*7e6ad469SVishal Kulkarni 	/* Collect context data */
1993*7e6ad469SVishal Kulkarni 	for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1994*7e6ad469SVishal Kulkarni 		qid_count = 0;
1995*7e6ad469SVishal Kulkarni 		for (j = 0; j < max_ctx_qid[i]; j++) {
1996*7e6ad469SVishal Kulkarni 			read_sge_ctxt(pdbg_init, j, i, buff->data);
1997*7e6ad469SVishal Kulkarni 
1998*7e6ad469SVishal Kulkarni 			rc = check_valid(buff->data, i);
1999*7e6ad469SVishal Kulkarni 			if (rc) {
2000*7e6ad469SVishal Kulkarni 				buff->cntxt_type = i;
2001*7e6ad469SVishal Kulkarni 				buff->cntxt_id = j;
2002*7e6ad469SVishal Kulkarni 				buff++;
2003*7e6ad469SVishal Kulkarni 				total_size += sizeof(struct cudbg_ch_cntxt);
2004*7e6ad469SVishal Kulkarni 
2005*7e6ad469SVishal Kulkarni 				if (i == CTXT_FLM) {
2006*7e6ad469SVishal Kulkarni 					read_sge_ctxt(pdbg_init, j, CTXT_CNM,
2007*7e6ad469SVishal Kulkarni 						      buff->data);
2008*7e6ad469SVishal Kulkarni 					buff->cntxt_type = CTXT_CNM;
2009*7e6ad469SVishal Kulkarni 					buff->cntxt_id = j;
2010*7e6ad469SVishal Kulkarni 					buff++;
2011*7e6ad469SVishal Kulkarni 					total_size +=
2012*7e6ad469SVishal Kulkarni 						sizeof(struct cudbg_ch_cntxt);
2013*7e6ad469SVishal Kulkarni 				}
2014*7e6ad469SVishal Kulkarni 				qid_count++;
2015*7e6ad469SVishal Kulkarni 			}
2016*7e6ad469SVishal Kulkarni 
2017*7e6ad469SVishal Kulkarni 			/* If there's not enough space to collect more qids,
2018*7e6ad469SVishal Kulkarni 			 * then bail and move on to next queue type.
2019*7e6ad469SVishal Kulkarni 			 */
2020*7e6ad469SVishal Kulkarni 			if (limit_qid &&
2021*7e6ad469SVishal Kulkarni 			    qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
2022*7e6ad469SVishal Kulkarni 				break;
2023*7e6ad469SVishal Kulkarni 		}
2024*7e6ad469SVishal Kulkarni 	}
2025*7e6ad469SVishal Kulkarni 
2026*7e6ad469SVishal Kulkarni 	scratch_buff.size = total_size;
2027*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2028*7e6ad469SVishal Kulkarni 	if (rc)
2029*7e6ad469SVishal Kulkarni 		goto err1;
2030*7e6ad469SVishal Kulkarni 
2031*7e6ad469SVishal Kulkarni 	/* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
2032*7e6ad469SVishal Kulkarni 	while (total_size > 0) {
2033*7e6ad469SVishal Kulkarni 		bytes = min_t(unsigned long, (unsigned long)total_size,
2034*7e6ad469SVishal Kulkarni 			      (unsigned long)CUDBG_CHUNK_SIZE);
2035*7e6ad469SVishal Kulkarni 		temp_buff.size = bytes;
2036*7e6ad469SVishal Kulkarni 		temp_buff.data = (void *)((char *)scratch_buff.data +
2037*7e6ad469SVishal Kulkarni 					  next_offset);
2038*7e6ad469SVishal Kulkarni 
2039*7e6ad469SVishal Kulkarni 		rc = compress_buff(&temp_buff, dbg_buff);
2040*7e6ad469SVishal Kulkarni 		if (rc)
2041*7e6ad469SVishal Kulkarni 			goto err1;
2042*7e6ad469SVishal Kulkarni 
2043*7e6ad469SVishal Kulkarni 		total_size -= bytes;
2044*7e6ad469SVishal Kulkarni 		next_offset += bytes;
2045*7e6ad469SVishal Kulkarni 	}
2046*7e6ad469SVishal Kulkarni 
2047*7e6ad469SVishal Kulkarni err1:
2048*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
2049*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2050*7e6ad469SVishal Kulkarni err:
2051*7e6ad469SVishal Kulkarni 	return rc;
2052*7e6ad469SVishal Kulkarni }
2053*7e6ad469SVishal Kulkarni 
2054*7e6ad469SVishal Kulkarni static int
2055*7e6ad469SVishal Kulkarni collect_fw_devlog(struct cudbg_init *pdbg_init,
2056*7e6ad469SVishal Kulkarni 		  struct cudbg_buffer *dbg_buff,
2057*7e6ad469SVishal Kulkarni 		  struct cudbg_error *cudbg_err)
2058*7e6ad469SVishal Kulkarni {
2059*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2060*7e6ad469SVishal Kulkarni 	struct devlog_params *dparams = &padap->params.devlog;
2061*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2062*7e6ad469SVishal Kulkarni 	u32 offset;
2063*7e6ad469SVishal Kulkarni 	int rc = 0;
2064*7e6ad469SVishal Kulkarni 
2065*7e6ad469SVishal Kulkarni 	rc = t4_init_devlog_params(padap, 1);
2066*7e6ad469SVishal Kulkarni 
2067*7e6ad469SVishal Kulkarni 	if (rc < 0) {
2068*7e6ad469SVishal Kulkarni 		pdbg_init->print(padap->dip, CE_NOTE,
2069*7e6ad469SVishal Kulkarni 				 "%s(), t4_init_devlog_params failed!, rc: "\
2070*7e6ad469SVishal Kulkarni 				 "%d\n", __func__, rc);
2071*7e6ad469SVishal Kulkarni 		rc = CUDBG_SYSTEM_ERROR;
2072*7e6ad469SVishal Kulkarni 		goto err;
2073*7e6ad469SVishal Kulkarni 	}
2074*7e6ad469SVishal Kulkarni 
2075*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
2076*7e6ad469SVishal Kulkarni 
2077*7e6ad469SVishal Kulkarni 	if (rc)
2078*7e6ad469SVishal Kulkarni 		goto err;
2079*7e6ad469SVishal Kulkarni 
2080*7e6ad469SVishal Kulkarni 	/* Collect FW devlog */
2081*7e6ad469SVishal Kulkarni 	if (dparams->start != 0) {
2082*7e6ad469SVishal Kulkarni 		offset = scratch_buff.offset;
2083*7e6ad469SVishal Kulkarni 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
2084*7e6ad469SVishal Kulkarni 				  dparams->memtype, dparams->start,
2085*7e6ad469SVishal Kulkarni 				  dparams->size,
2086*7e6ad469SVishal Kulkarni 				  (__be32 *)((char *)scratch_buff.data +
2087*7e6ad469SVishal Kulkarni 					     offset), 1);
2088*7e6ad469SVishal Kulkarni 
2089*7e6ad469SVishal Kulkarni 		if (rc) {
2090*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2091*7e6ad469SVishal Kulkarni 					 "%s(), t4_memory_rw failed!, rc: "\
2092*7e6ad469SVishal Kulkarni 					 "%d\n", __func__, rc);
2093*7e6ad469SVishal Kulkarni 			cudbg_err->sys_err = rc;
2094*7e6ad469SVishal Kulkarni 			goto err1;
2095*7e6ad469SVishal Kulkarni 		}
2096*7e6ad469SVishal Kulkarni 	}
2097*7e6ad469SVishal Kulkarni 
2098*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2099*7e6ad469SVishal Kulkarni 
2100*7e6ad469SVishal Kulkarni 	if (rc)
2101*7e6ad469SVishal Kulkarni 		goto err1;
2102*7e6ad469SVishal Kulkarni 
2103*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2104*7e6ad469SVishal Kulkarni 
2105*7e6ad469SVishal Kulkarni err1:
2106*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2107*7e6ad469SVishal Kulkarni err:
2108*7e6ad469SVishal Kulkarni 	return rc;
2109*7e6ad469SVishal Kulkarni }
2110*7e6ad469SVishal Kulkarni /* CIM OBQ */
2111*7e6ad469SVishal Kulkarni 
2112*7e6ad469SVishal Kulkarni static int
2113*7e6ad469SVishal Kulkarni collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2114*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2115*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2116*7e6ad469SVishal Kulkarni {
2117*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 0;
2118*7e6ad469SVishal Kulkarni 
2119*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2120*7e6ad469SVishal Kulkarni 
2121*7e6ad469SVishal Kulkarni 	return rc;
2122*7e6ad469SVishal Kulkarni }
2123*7e6ad469SVishal Kulkarni 
2124*7e6ad469SVishal Kulkarni static int
2125*7e6ad469SVishal Kulkarni collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2126*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2127*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2128*7e6ad469SVishal Kulkarni {
2129*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 1;
2130*7e6ad469SVishal Kulkarni 
2131*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2132*7e6ad469SVishal Kulkarni 
2133*7e6ad469SVishal Kulkarni 	return rc;
2134*7e6ad469SVishal Kulkarni }
2135*7e6ad469SVishal Kulkarni 
2136*7e6ad469SVishal Kulkarni static int
2137*7e6ad469SVishal Kulkarni collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2138*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2139*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2140*7e6ad469SVishal Kulkarni {
2141*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 2;
2142*7e6ad469SVishal Kulkarni 
2143*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2144*7e6ad469SVishal Kulkarni 
2145*7e6ad469SVishal Kulkarni 	return rc;
2146*7e6ad469SVishal Kulkarni }
2147*7e6ad469SVishal Kulkarni 
2148*7e6ad469SVishal Kulkarni static int
2149*7e6ad469SVishal Kulkarni collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2150*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2151*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2152*7e6ad469SVishal Kulkarni {
2153*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 3;
2154*7e6ad469SVishal Kulkarni 
2155*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2156*7e6ad469SVishal Kulkarni 
2157*7e6ad469SVishal Kulkarni 	return rc;
2158*7e6ad469SVishal Kulkarni }
2159*7e6ad469SVishal Kulkarni 
2160*7e6ad469SVishal Kulkarni static int
2161*7e6ad469SVishal Kulkarni collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2162*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
2163*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
2164*7e6ad469SVishal Kulkarni {
2165*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 4;
2166*7e6ad469SVishal Kulkarni 
2167*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2168*7e6ad469SVishal Kulkarni 
2169*7e6ad469SVishal Kulkarni 	return rc;
2170*7e6ad469SVishal Kulkarni }
2171*7e6ad469SVishal Kulkarni 
2172*7e6ad469SVishal Kulkarni static int
2173*7e6ad469SVishal Kulkarni collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2174*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2175*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2176*7e6ad469SVishal Kulkarni {
2177*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 5;
2178*7e6ad469SVishal Kulkarni 
2179*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2180*7e6ad469SVishal Kulkarni 
2181*7e6ad469SVishal Kulkarni 	return rc;
2182*7e6ad469SVishal Kulkarni }
2183*7e6ad469SVishal Kulkarni 
2184*7e6ad469SVishal Kulkarni static int
2185*7e6ad469SVishal Kulkarni collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2186*7e6ad469SVishal Kulkarni 		      struct cudbg_buffer *dbg_buff,
2187*7e6ad469SVishal Kulkarni 		      struct cudbg_error *cudbg_err)
2188*7e6ad469SVishal Kulkarni {
2189*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 6;
2190*7e6ad469SVishal Kulkarni 
2191*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2192*7e6ad469SVishal Kulkarni 
2193*7e6ad469SVishal Kulkarni 	return rc;
2194*7e6ad469SVishal Kulkarni }
2195*7e6ad469SVishal Kulkarni 
2196*7e6ad469SVishal Kulkarni static int
2197*7e6ad469SVishal Kulkarni collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2198*7e6ad469SVishal Kulkarni 		      struct cudbg_buffer *dbg_buff,
2199*7e6ad469SVishal Kulkarni 		      struct cudbg_error *cudbg_err)
2200*7e6ad469SVishal Kulkarni {
2201*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 7;
2202*7e6ad469SVishal Kulkarni 
2203*7e6ad469SVishal Kulkarni 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2204*7e6ad469SVishal Kulkarni 
2205*7e6ad469SVishal Kulkarni 	return rc;
2206*7e6ad469SVishal Kulkarni }
2207*7e6ad469SVishal Kulkarni 
2208*7e6ad469SVishal Kulkarni static int
2209*7e6ad469SVishal Kulkarni read_cim_obq(struct cudbg_init *pdbg_init,
2210*7e6ad469SVishal Kulkarni 	     struct cudbg_buffer *dbg_buff,
2211*7e6ad469SVishal Kulkarni 	     struct cudbg_error *cudbg_err, int qid)
2212*7e6ad469SVishal Kulkarni {
2213*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2214*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2215*7e6ad469SVishal Kulkarni 	u32 qsize;
2216*7e6ad469SVishal Kulkarni 	int rc;
2217*7e6ad469SVishal Kulkarni 	int no_of_read_words;
2218*7e6ad469SVishal Kulkarni 
2219*7e6ad469SVishal Kulkarni 	/* collect CIM OBQ */
2220*7e6ad469SVishal Kulkarni 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2221*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2222*7e6ad469SVishal Kulkarni 	if (rc)
2223*7e6ad469SVishal Kulkarni 		goto err;
2224*7e6ad469SVishal Kulkarni 
2225*7e6ad469SVishal Kulkarni 	/* t4_read_cim_obq will return no. of read words or error */
2226*7e6ad469SVishal Kulkarni 	no_of_read_words = t4_read_cim_obq(padap, qid,
2227*7e6ad469SVishal Kulkarni 					   (u32 *)((u32 *)scratch_buff.data +
2228*7e6ad469SVishal Kulkarni 					   scratch_buff.offset), qsize);
2229*7e6ad469SVishal Kulkarni 
2230*7e6ad469SVishal Kulkarni 	/* no_of_read_words is less than or equal to 0 means error */
2231*7e6ad469SVishal Kulkarni 	if (no_of_read_words <= 0) {
2232*7e6ad469SVishal Kulkarni 		if (no_of_read_words == 0)
2233*7e6ad469SVishal Kulkarni 			rc = CUDBG_SYSTEM_ERROR;
2234*7e6ad469SVishal Kulkarni 		else
2235*7e6ad469SVishal Kulkarni 			rc = no_of_read_words;
2236*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2237*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2238*7e6ad469SVishal Kulkarni 					 "%s: t4_read_cim_obq failed (%d)\n",
2239*7e6ad469SVishal Kulkarni 				 __func__, rc);
2240*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
2241*7e6ad469SVishal Kulkarni 		goto err1;
2242*7e6ad469SVishal Kulkarni 	}
2243*7e6ad469SVishal Kulkarni 
2244*7e6ad469SVishal Kulkarni 	scratch_buff.size = no_of_read_words * 4;
2245*7e6ad469SVishal Kulkarni 
2246*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2247*7e6ad469SVishal Kulkarni 
2248*7e6ad469SVishal Kulkarni 	if (rc)
2249*7e6ad469SVishal Kulkarni 		goto err1;
2250*7e6ad469SVishal Kulkarni 
2251*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2252*7e6ad469SVishal Kulkarni 
2253*7e6ad469SVishal Kulkarni 	if (rc)
2254*7e6ad469SVishal Kulkarni 		goto err1;
2255*7e6ad469SVishal Kulkarni 
2256*7e6ad469SVishal Kulkarni err1:
2257*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2258*7e6ad469SVishal Kulkarni err:
2259*7e6ad469SVishal Kulkarni 	return rc;
2260*7e6ad469SVishal Kulkarni }
2261*7e6ad469SVishal Kulkarni 
2262*7e6ad469SVishal Kulkarni /* CIM IBQ */
2263*7e6ad469SVishal Kulkarni 
2264*7e6ad469SVishal Kulkarni static int
2265*7e6ad469SVishal Kulkarni collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2266*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
2267*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
2268*7e6ad469SVishal Kulkarni {
2269*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 0;
2270*7e6ad469SVishal Kulkarni 
2271*7e6ad469SVishal Kulkarni 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2272*7e6ad469SVishal Kulkarni 	return rc;
2273*7e6ad469SVishal Kulkarni }
2274*7e6ad469SVishal Kulkarni 
2275*7e6ad469SVishal Kulkarni static int
2276*7e6ad469SVishal Kulkarni collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2277*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
2278*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
2279*7e6ad469SVishal Kulkarni {
2280*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 1;
2281*7e6ad469SVishal Kulkarni 
2282*7e6ad469SVishal Kulkarni 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2283*7e6ad469SVishal Kulkarni 	return rc;
2284*7e6ad469SVishal Kulkarni }
2285*7e6ad469SVishal Kulkarni 
2286*7e6ad469SVishal Kulkarni static int
2287*7e6ad469SVishal Kulkarni collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2288*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
2289*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
2290*7e6ad469SVishal Kulkarni {
2291*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 2;
2292*7e6ad469SVishal Kulkarni 
2293*7e6ad469SVishal Kulkarni 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2294*7e6ad469SVishal Kulkarni 	return rc;
2295*7e6ad469SVishal Kulkarni }
2296*7e6ad469SVishal Kulkarni 
2297*7e6ad469SVishal Kulkarni static int
2298*7e6ad469SVishal Kulkarni collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2299*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2300*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2301*7e6ad469SVishal Kulkarni {
2302*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 3;
2303*7e6ad469SVishal Kulkarni 
2304*7e6ad469SVishal Kulkarni 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2305*7e6ad469SVishal Kulkarni 	return rc;
2306*7e6ad469SVishal Kulkarni }
2307*7e6ad469SVishal Kulkarni 
2308*7e6ad469SVishal Kulkarni static int
2309*7e6ad469SVishal Kulkarni collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2310*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2311*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2312*7e6ad469SVishal Kulkarni {
2313*7e6ad469SVishal Kulkarni 	int rc = 0, qid = 4;
2314*7e6ad469SVishal Kulkarni 
2315*7e6ad469SVishal Kulkarni 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2316*7e6ad469SVishal Kulkarni 	return rc;
2317*7e6ad469SVishal Kulkarni }
2318*7e6ad469SVishal Kulkarni 
2319*7e6ad469SVishal Kulkarni static int
2320*7e6ad469SVishal Kulkarni collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2321*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2322*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2323*7e6ad469SVishal Kulkarni {
2324*7e6ad469SVishal Kulkarni 	int rc, qid = 5;
2325*7e6ad469SVishal Kulkarni 
2326*7e6ad469SVishal Kulkarni 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2327*7e6ad469SVishal Kulkarni 	return rc;
2328*7e6ad469SVishal Kulkarni }
2329*7e6ad469SVishal Kulkarni 
2330*7e6ad469SVishal Kulkarni static int
2331*7e6ad469SVishal Kulkarni read_cim_ibq(struct cudbg_init *pdbg_init,
2332*7e6ad469SVishal Kulkarni 	     struct cudbg_buffer *dbg_buff,
2333*7e6ad469SVishal Kulkarni 	     struct cudbg_error *cudbg_err, int qid)
2334*7e6ad469SVishal Kulkarni {
2335*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2336*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2337*7e6ad469SVishal Kulkarni 	u32 qsize;
2338*7e6ad469SVishal Kulkarni 	int rc;
2339*7e6ad469SVishal Kulkarni 	int no_of_read_words;
2340*7e6ad469SVishal Kulkarni 
2341*7e6ad469SVishal Kulkarni 	/* collect CIM IBQ */
2342*7e6ad469SVishal Kulkarni 	qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2343*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2344*7e6ad469SVishal Kulkarni 
2345*7e6ad469SVishal Kulkarni 	if (rc)
2346*7e6ad469SVishal Kulkarni 		goto err;
2347*7e6ad469SVishal Kulkarni 
2348*7e6ad469SVishal Kulkarni 	/* t4_read_cim_ibq will return no. of read words or error */
2349*7e6ad469SVishal Kulkarni 	no_of_read_words = t4_read_cim_ibq(padap, qid,
2350*7e6ad469SVishal Kulkarni 					   (u32 *)((u32 *)scratch_buff.data +
2351*7e6ad469SVishal Kulkarni 					   scratch_buff.offset), qsize);
2352*7e6ad469SVishal Kulkarni 	/* no_of_read_words is less than or equal to 0 means error */
2353*7e6ad469SVishal Kulkarni 	if (no_of_read_words <= 0) {
2354*7e6ad469SVishal Kulkarni 		if (no_of_read_words == 0)
2355*7e6ad469SVishal Kulkarni 			rc = CUDBG_SYSTEM_ERROR;
2356*7e6ad469SVishal Kulkarni 		else
2357*7e6ad469SVishal Kulkarni 			rc = no_of_read_words;
2358*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2359*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2360*7e6ad469SVishal Kulkarni 					 "%s: t4_read_cim_ibq failed (%d)\n",
2361*7e6ad469SVishal Kulkarni 				 __func__, rc);
2362*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
2363*7e6ad469SVishal Kulkarni 		goto err1;
2364*7e6ad469SVishal Kulkarni 	}
2365*7e6ad469SVishal Kulkarni 
2366*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2367*7e6ad469SVishal Kulkarni 	if (rc)
2368*7e6ad469SVishal Kulkarni 		goto err1;
2369*7e6ad469SVishal Kulkarni 
2370*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2371*7e6ad469SVishal Kulkarni 	if (rc)
2372*7e6ad469SVishal Kulkarni 		goto err1;
2373*7e6ad469SVishal Kulkarni 
2374*7e6ad469SVishal Kulkarni err1:
2375*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2376*7e6ad469SVishal Kulkarni 
2377*7e6ad469SVishal Kulkarni err:
2378*7e6ad469SVishal Kulkarni 	return rc;
2379*7e6ad469SVishal Kulkarni }
2380*7e6ad469SVishal Kulkarni 
2381*7e6ad469SVishal Kulkarni static int
2382*7e6ad469SVishal Kulkarni collect_cim_ma_la(struct cudbg_init *pdbg_init,
2383*7e6ad469SVishal Kulkarni 		  struct cudbg_buffer *dbg_buff,
2384*7e6ad469SVishal Kulkarni 		  struct cudbg_error *cudbg_err)
2385*7e6ad469SVishal Kulkarni {
2386*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2387*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2388*7e6ad469SVishal Kulkarni 	u32 rc = 0;
2389*7e6ad469SVishal Kulkarni 
2390*7e6ad469SVishal Kulkarni 	/* collect CIM MA LA */
2391*7e6ad469SVishal Kulkarni 	scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2392*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2393*7e6ad469SVishal Kulkarni 	if (rc)
2394*7e6ad469SVishal Kulkarni 		goto err;
2395*7e6ad469SVishal Kulkarni 
2396*7e6ad469SVishal Kulkarni 	/* no return */
2397*7e6ad469SVishal Kulkarni 	t4_cim_read_ma_la(padap,
2398*7e6ad469SVishal Kulkarni 			  (u32 *) ((char *)scratch_buff.data +
2399*7e6ad469SVishal Kulkarni 				   scratch_buff.offset),
2400*7e6ad469SVishal Kulkarni 			  (u32 *) ((char *)scratch_buff.data +
2401*7e6ad469SVishal Kulkarni 				   scratch_buff.offset + 5 * CIM_MALA_SIZE));
2402*7e6ad469SVishal Kulkarni 
2403*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2404*7e6ad469SVishal Kulkarni 	if (rc)
2405*7e6ad469SVishal Kulkarni 		goto err1;
2406*7e6ad469SVishal Kulkarni 
2407*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2408*7e6ad469SVishal Kulkarni 
2409*7e6ad469SVishal Kulkarni err1:
2410*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2411*7e6ad469SVishal Kulkarni err:
2412*7e6ad469SVishal Kulkarni 	return rc;
2413*7e6ad469SVishal Kulkarni }
2414*7e6ad469SVishal Kulkarni 
2415*7e6ad469SVishal Kulkarni static int
2416*7e6ad469SVishal Kulkarni collect_cim_la(struct cudbg_init *pdbg_init,
2417*7e6ad469SVishal Kulkarni 	       struct cudbg_buffer *dbg_buff,
2418*7e6ad469SVishal Kulkarni 	       struct cudbg_error *cudbg_err)
2419*7e6ad469SVishal Kulkarni {
2420*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2421*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2422*7e6ad469SVishal Kulkarni 
2423*7e6ad469SVishal Kulkarni 	int rc;
2424*7e6ad469SVishal Kulkarni 	u32 cfg = 0;
2425*7e6ad469SVishal Kulkarni 	int size;
2426*7e6ad469SVishal Kulkarni 
2427*7e6ad469SVishal Kulkarni 	/* collect CIM LA */
2428*7e6ad469SVishal Kulkarni 	if (is_t6(padap->params.chip)) {
2429*7e6ad469SVishal Kulkarni 		size = padap->params.cim_la_size / 10 + 1;
2430*7e6ad469SVishal Kulkarni 		size *= 11 * sizeof(u32);
2431*7e6ad469SVishal Kulkarni 	} else {
2432*7e6ad469SVishal Kulkarni 		size = padap->params.cim_la_size / 8;
2433*7e6ad469SVishal Kulkarni 		size *= 8 * sizeof(u32);
2434*7e6ad469SVishal Kulkarni 	}
2435*7e6ad469SVishal Kulkarni 
2436*7e6ad469SVishal Kulkarni 	size += sizeof(cfg);
2437*7e6ad469SVishal Kulkarni 
2438*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2439*7e6ad469SVishal Kulkarni 	if (rc)
2440*7e6ad469SVishal Kulkarni 		goto err;
2441*7e6ad469SVishal Kulkarni 
2442*7e6ad469SVishal Kulkarni 	rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2443*7e6ad469SVishal Kulkarni 
2444*7e6ad469SVishal Kulkarni 	if (rc) {
2445*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2446*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2447*7e6ad469SVishal Kulkarni 					 "%s: t4_cim_read failed (%d)\n",
2448*7e6ad469SVishal Kulkarni 				 __func__, rc);
2449*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
2450*7e6ad469SVishal Kulkarni 		goto err1;
2451*7e6ad469SVishal Kulkarni 	}
2452*7e6ad469SVishal Kulkarni 
2453*7e6ad469SVishal Kulkarni 	memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2454*7e6ad469SVishal Kulkarni 	       sizeof(cfg));
2455*7e6ad469SVishal Kulkarni 
2456*7e6ad469SVishal Kulkarni 	rc = t4_cim_read_la(padap,
2457*7e6ad469SVishal Kulkarni 			    (u32 *) ((char *)scratch_buff.data +
2458*7e6ad469SVishal Kulkarni 				     scratch_buff.offset + sizeof(cfg)), NULL);
2459*7e6ad469SVishal Kulkarni 	if (rc < 0) {
2460*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2461*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2462*7e6ad469SVishal Kulkarni 					 "%s: t4_cim_read_la failed (%d)\n",
2463*7e6ad469SVishal Kulkarni 				 __func__, rc);
2464*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
2465*7e6ad469SVishal Kulkarni 		goto err1;
2466*7e6ad469SVishal Kulkarni 	}
2467*7e6ad469SVishal Kulkarni 
2468*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2469*7e6ad469SVishal Kulkarni 	if (rc)
2470*7e6ad469SVishal Kulkarni 		goto err1;
2471*7e6ad469SVishal Kulkarni 
2472*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2473*7e6ad469SVishal Kulkarni 	if (rc)
2474*7e6ad469SVishal Kulkarni 		goto err1;
2475*7e6ad469SVishal Kulkarni 
2476*7e6ad469SVishal Kulkarni err1:
2477*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2478*7e6ad469SVishal Kulkarni err:
2479*7e6ad469SVishal Kulkarni 	return rc;
2480*7e6ad469SVishal Kulkarni }
2481*7e6ad469SVishal Kulkarni 
2482*7e6ad469SVishal Kulkarni static int
2483*7e6ad469SVishal Kulkarni collect_cim_qcfg(struct cudbg_init *pdbg_init,
2484*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
2485*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
2486*7e6ad469SVishal Kulkarni {
2487*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2488*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2489*7e6ad469SVishal Kulkarni 	u32 offset;
2490*7e6ad469SVishal Kulkarni 	int rc = 0;
2491*7e6ad469SVishal Kulkarni 
2492*7e6ad469SVishal Kulkarni 	struct struct_cim_qcfg *cim_qcfg_data = NULL;
2493*7e6ad469SVishal Kulkarni 
2494*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2495*7e6ad469SVishal Kulkarni 			      &scratch_buff);
2496*7e6ad469SVishal Kulkarni 
2497*7e6ad469SVishal Kulkarni 	if (rc)
2498*7e6ad469SVishal Kulkarni 		goto err;
2499*7e6ad469SVishal Kulkarni 
2500*7e6ad469SVishal Kulkarni 	offset = scratch_buff.offset;
2501*7e6ad469SVishal Kulkarni 
2502*7e6ad469SVishal Kulkarni 	cim_qcfg_data =
2503*7e6ad469SVishal Kulkarni 		(struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2504*7e6ad469SVishal Kulkarni 					   offset));
2505*7e6ad469SVishal Kulkarni 
2506*7e6ad469SVishal Kulkarni 	rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2507*7e6ad469SVishal Kulkarni 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2508*7e6ad469SVishal Kulkarni 
2509*7e6ad469SVishal Kulkarni 	if (rc) {
2510*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2511*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2512*7e6ad469SVishal Kulkarni 					 "%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2513*7e6ad469SVishal Kulkarni 			    __func__, rc);
2514*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
2515*7e6ad469SVishal Kulkarni 		goto err1;
2516*7e6ad469SVishal Kulkarni 	}
2517*7e6ad469SVishal Kulkarni 
2518*7e6ad469SVishal Kulkarni 	rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2519*7e6ad469SVishal Kulkarni 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2520*7e6ad469SVishal Kulkarni 			 cim_qcfg_data->obq_wr);
2521*7e6ad469SVishal Kulkarni 
2522*7e6ad469SVishal Kulkarni 	if (rc) {
2523*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2524*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
2525*7e6ad469SVishal Kulkarni 					 "%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2526*7e6ad469SVishal Kulkarni 			    __func__, rc);
2527*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
2528*7e6ad469SVishal Kulkarni 		goto err1;
2529*7e6ad469SVishal Kulkarni 	}
2530*7e6ad469SVishal Kulkarni 
2531*7e6ad469SVishal Kulkarni 	/* no return val */
2532*7e6ad469SVishal Kulkarni 	t4_read_cimq_cfg(padap,
2533*7e6ad469SVishal Kulkarni 			cim_qcfg_data->base,
2534*7e6ad469SVishal Kulkarni 			cim_qcfg_data->size,
2535*7e6ad469SVishal Kulkarni 			cim_qcfg_data->thres);
2536*7e6ad469SVishal Kulkarni 
2537*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2538*7e6ad469SVishal Kulkarni 	if (rc)
2539*7e6ad469SVishal Kulkarni 		goto err1;
2540*7e6ad469SVishal Kulkarni 
2541*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2542*7e6ad469SVishal Kulkarni 	if (rc)
2543*7e6ad469SVishal Kulkarni 		goto err1;
2544*7e6ad469SVishal Kulkarni 
2545*7e6ad469SVishal Kulkarni err1:
2546*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2547*7e6ad469SVishal Kulkarni err:
2548*7e6ad469SVishal Kulkarni 	return rc;
2549*7e6ad469SVishal Kulkarni }
2550*7e6ad469SVishal Kulkarni 
2551*7e6ad469SVishal Kulkarni static int
2552*7e6ad469SVishal Kulkarni read_fw_mem(struct cudbg_init *pdbg_init,
2553*7e6ad469SVishal Kulkarni 	    struct cudbg_buffer *dbg_buff, u8 mem_type,
2554*7e6ad469SVishal Kulkarni 	    unsigned long tot_len, struct cudbg_error *cudbg_err)
2555*7e6ad469SVishal Kulkarni {
2556*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2557*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2558*7e6ad469SVishal Kulkarni 	unsigned long bytes_read = 0;
2559*7e6ad469SVishal Kulkarni 	unsigned long bytes_left;
2560*7e6ad469SVishal Kulkarni 	unsigned long bytes;
2561*7e6ad469SVishal Kulkarni 	int	      rc;
2562*7e6ad469SVishal Kulkarni 
2563*7e6ad469SVishal Kulkarni 	bytes_left = tot_len;
2564*7e6ad469SVishal Kulkarni 	scratch_buff.size = tot_len;
2565*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2566*7e6ad469SVishal Kulkarni 	if (rc)
2567*7e6ad469SVishal Kulkarni 		goto err;
2568*7e6ad469SVishal Kulkarni 
2569*7e6ad469SVishal Kulkarni 	while (bytes_left > 0) {
2570*7e6ad469SVishal Kulkarni 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2571*7e6ad469SVishal Kulkarni 		rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2572*7e6ad469SVishal Kulkarni 
2573*7e6ad469SVishal Kulkarni 		if (rc) {
2574*7e6ad469SVishal Kulkarni 			rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2575*7e6ad469SVishal Kulkarni 			goto err;
2576*7e6ad469SVishal Kulkarni 		}
2577*7e6ad469SVishal Kulkarni 
2578*7e6ad469SVishal Kulkarni 		/* Read from file */
2579*7e6ad469SVishal Kulkarni 		/*fread(scratch_buff.data, 1, Bytes, in);*/
2580*7e6ad469SVishal Kulkarni 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2581*7e6ad469SVishal Kulkarni 				  bytes, (__be32 *)(scratch_buff.data), 1);
2582*7e6ad469SVishal Kulkarni 
2583*7e6ad469SVishal Kulkarni 		if (rc) {
2584*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
2585*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
2586*7e6ad469SVishal Kulkarni 						 "%s: t4_memory_rw failed (%d)",
2587*7e6ad469SVishal Kulkarni 				    __func__, rc);
2588*7e6ad469SVishal Kulkarni 			cudbg_err->sys_err = rc;
2589*7e6ad469SVishal Kulkarni 			goto err1;
2590*7e6ad469SVishal Kulkarni 		}
2591*7e6ad469SVishal Kulkarni 
2592*7e6ad469SVishal Kulkarni 		rc = compress_buff(&scratch_buff, dbg_buff);
2593*7e6ad469SVishal Kulkarni 		if (rc)
2594*7e6ad469SVishal Kulkarni 			goto err1;
2595*7e6ad469SVishal Kulkarni 
2596*7e6ad469SVishal Kulkarni 		bytes_left -= bytes;
2597*7e6ad469SVishal Kulkarni 		bytes_read += bytes;
2598*7e6ad469SVishal Kulkarni 		release_scratch_buff(&scratch_buff, dbg_buff);
2599*7e6ad469SVishal Kulkarni 	}
2600*7e6ad469SVishal Kulkarni 
2601*7e6ad469SVishal Kulkarni err1:
2602*7e6ad469SVishal Kulkarni 	if (rc)
2603*7e6ad469SVishal Kulkarni 		release_scratch_buff(&scratch_buff, dbg_buff);
2604*7e6ad469SVishal Kulkarni 
2605*7e6ad469SVishal Kulkarni err:
2606*7e6ad469SVishal Kulkarni 	return rc;
2607*7e6ad469SVishal Kulkarni }
2608*7e6ad469SVishal Kulkarni 
2609*7e6ad469SVishal Kulkarni static void
2610*7e6ad469SVishal Kulkarni collect_mem_info(struct cudbg_init *pdbg_init,
2611*7e6ad469SVishal Kulkarni 		 struct card_mem *mem_info)
2612*7e6ad469SVishal Kulkarni {
2613*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2614*7e6ad469SVishal Kulkarni 	u32 value;
2615*7e6ad469SVishal Kulkarni 	int t4 = 0;
2616*7e6ad469SVishal Kulkarni 
2617*7e6ad469SVishal Kulkarni 	if (is_t4(padap->params.chip))
2618*7e6ad469SVishal Kulkarni 		t4 = 1;
2619*7e6ad469SVishal Kulkarni 
2620*7e6ad469SVishal Kulkarni 	if (t4) {
2621*7e6ad469SVishal Kulkarni 		value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2622*7e6ad469SVishal Kulkarni 		value = G_EXT_MEM_SIZE(value);
2623*7e6ad469SVishal Kulkarni 		mem_info->size_mc0 = (u16)value;  /* size in MB */
2624*7e6ad469SVishal Kulkarni 
2625*7e6ad469SVishal Kulkarni 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2626*7e6ad469SVishal Kulkarni 		if (value & F_EXT_MEM_ENABLE)
2627*7e6ad469SVishal Kulkarni 			mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2628*7e6ad469SVishal Kulkarni 								  bit */
2629*7e6ad469SVishal Kulkarni 	} else {
2630*7e6ad469SVishal Kulkarni 		value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2631*7e6ad469SVishal Kulkarni 		value = G_EXT_MEM0_SIZE(value);
2632*7e6ad469SVishal Kulkarni 		mem_info->size_mc0 = (u16)value;
2633*7e6ad469SVishal Kulkarni 
2634*7e6ad469SVishal Kulkarni 		value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2635*7e6ad469SVishal Kulkarni 		value = G_EXT_MEM1_SIZE(value);
2636*7e6ad469SVishal Kulkarni 		mem_info->size_mc1 = (u16)value;
2637*7e6ad469SVishal Kulkarni 
2638*7e6ad469SVishal Kulkarni 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2639*7e6ad469SVishal Kulkarni 		if (value & F_EXT_MEM0_ENABLE)
2640*7e6ad469SVishal Kulkarni 			mem_info->mem_flag |= (1 << MC0_FLAG);
2641*7e6ad469SVishal Kulkarni 		if (value & F_EXT_MEM1_ENABLE)
2642*7e6ad469SVishal Kulkarni 			mem_info->mem_flag |= (1 << MC1_FLAG);
2643*7e6ad469SVishal Kulkarni 	}
2644*7e6ad469SVishal Kulkarni 
2645*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2646*7e6ad469SVishal Kulkarni 	value = G_EDRAM0_SIZE(value);
2647*7e6ad469SVishal Kulkarni 	mem_info->size_edc0 = (u16)value;
2648*7e6ad469SVishal Kulkarni 
2649*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2650*7e6ad469SVishal Kulkarni 	value = G_EDRAM1_SIZE(value);
2651*7e6ad469SVishal Kulkarni 	mem_info->size_edc1 = (u16)value;
2652*7e6ad469SVishal Kulkarni 
2653*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2654*7e6ad469SVishal Kulkarni 	if (value & F_EDRAM0_ENABLE)
2655*7e6ad469SVishal Kulkarni 		mem_info->mem_flag |= (1 << EDC0_FLAG);
2656*7e6ad469SVishal Kulkarni 	if (value & F_EDRAM1_ENABLE)
2657*7e6ad469SVishal Kulkarni 		mem_info->mem_flag |= (1 << EDC1_FLAG);
2658*7e6ad469SVishal Kulkarni 
2659*7e6ad469SVishal Kulkarni }
2660*7e6ad469SVishal Kulkarni 
2661*7e6ad469SVishal Kulkarni static void
2662*7e6ad469SVishal Kulkarni cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2663*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
2664*7e6ad469SVishal Kulkarni {
2665*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2666*7e6ad469SVishal Kulkarni 	int rc;
2667*7e6ad469SVishal Kulkarni 
2668*7e6ad469SVishal Kulkarni 	if (is_fw_attached(pdbg_init)) {
2669*7e6ad469SVishal Kulkarni 
2670*7e6ad469SVishal Kulkarni 		/* Flush uP dcache before reading edcX/mcX  */
2671*7e6ad469SVishal Kulkarni 		rc = begin_synchronized_op(padap->port[0], 1, 1);
2672*7e6ad469SVishal Kulkarni 		if (rc == 0) {
2673*7e6ad469SVishal Kulkarni 			rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2674*7e6ad469SVishal Kulkarni 			end_synchronized_op(padap->port[0], 1);
2675*7e6ad469SVishal Kulkarni 		}
2676*7e6ad469SVishal Kulkarni 
2677*7e6ad469SVishal Kulkarni 		if (rc) {
2678*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
2679*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
2680*7e6ad469SVishal Kulkarni 						 "%s: t4_fwcache failed (%d)\n",
2681*7e6ad469SVishal Kulkarni 				 __func__, rc);
2682*7e6ad469SVishal Kulkarni 			cudbg_err->sys_warn = rc;
2683*7e6ad469SVishal Kulkarni 		}
2684*7e6ad469SVishal Kulkarni 	}
2685*7e6ad469SVishal Kulkarni }
2686*7e6ad469SVishal Kulkarni 
2687*7e6ad469SVishal Kulkarni static int
2688*7e6ad469SVishal Kulkarni collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2689*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2690*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2691*7e6ad469SVishal Kulkarni {
2692*7e6ad469SVishal Kulkarni 	struct card_mem mem_info = {0};
2693*7e6ad469SVishal Kulkarni 	unsigned long edc0_size;
2694*7e6ad469SVishal Kulkarni 	int rc;
2695*7e6ad469SVishal Kulkarni 
2696*7e6ad469SVishal Kulkarni 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2697*7e6ad469SVishal Kulkarni 
2698*7e6ad469SVishal Kulkarni 	collect_mem_info(pdbg_init, &mem_info);
2699*7e6ad469SVishal Kulkarni 
2700*7e6ad469SVishal Kulkarni 	if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2701*7e6ad469SVishal Kulkarni 		edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2702*7e6ad469SVishal Kulkarni 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2703*7e6ad469SVishal Kulkarni 				 edc0_size, cudbg_err);
2704*7e6ad469SVishal Kulkarni 		if (rc)
2705*7e6ad469SVishal Kulkarni 			goto err;
2706*7e6ad469SVishal Kulkarni 
2707*7e6ad469SVishal Kulkarni 	} else {
2708*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2709*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2710*7e6ad469SVishal Kulkarni 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2711*7e6ad469SVishal Kulkarni 					 "%s(), collect_mem_info failed!, %s\n",
2712*7e6ad469SVishal Kulkarni 				 __func__, err_msg[-rc]);
2713*7e6ad469SVishal Kulkarni 		goto err;
2714*7e6ad469SVishal Kulkarni 
2715*7e6ad469SVishal Kulkarni 	}
2716*7e6ad469SVishal Kulkarni err:
2717*7e6ad469SVishal Kulkarni 	return rc;
2718*7e6ad469SVishal Kulkarni }
2719*7e6ad469SVishal Kulkarni 
2720*7e6ad469SVishal Kulkarni static int
2721*7e6ad469SVishal Kulkarni collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2722*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
2723*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
2724*7e6ad469SVishal Kulkarni {
2725*7e6ad469SVishal Kulkarni 	struct card_mem mem_info = {0};
2726*7e6ad469SVishal Kulkarni 	unsigned long edc1_size;
2727*7e6ad469SVishal Kulkarni 	int rc;
2728*7e6ad469SVishal Kulkarni 
2729*7e6ad469SVishal Kulkarni 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2730*7e6ad469SVishal Kulkarni 
2731*7e6ad469SVishal Kulkarni 	collect_mem_info(pdbg_init, &mem_info);
2732*7e6ad469SVishal Kulkarni 
2733*7e6ad469SVishal Kulkarni 	if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2734*7e6ad469SVishal Kulkarni 		edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2735*7e6ad469SVishal Kulkarni 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2736*7e6ad469SVishal Kulkarni 				 edc1_size, cudbg_err);
2737*7e6ad469SVishal Kulkarni 		if (rc)
2738*7e6ad469SVishal Kulkarni 			goto err;
2739*7e6ad469SVishal Kulkarni 	} else {
2740*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2741*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2742*7e6ad469SVishal Kulkarni 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2743*7e6ad469SVishal Kulkarni 					 "%s(), collect_mem_info failed!, %s\n",
2744*7e6ad469SVishal Kulkarni 				 __func__, err_msg[-rc]);
2745*7e6ad469SVishal Kulkarni 		goto err;
2746*7e6ad469SVishal Kulkarni 	}
2747*7e6ad469SVishal Kulkarni 
2748*7e6ad469SVishal Kulkarni err:
2749*7e6ad469SVishal Kulkarni 
2750*7e6ad469SVishal Kulkarni 	return rc;
2751*7e6ad469SVishal Kulkarni }
2752*7e6ad469SVishal Kulkarni 
2753*7e6ad469SVishal Kulkarni static int
2754*7e6ad469SVishal Kulkarni collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2755*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
2756*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
2757*7e6ad469SVishal Kulkarni {
2758*7e6ad469SVishal Kulkarni 	struct card_mem mem_info = {0};
2759*7e6ad469SVishal Kulkarni 	unsigned long mc0_size;
2760*7e6ad469SVishal Kulkarni 	int rc;
2761*7e6ad469SVishal Kulkarni 
2762*7e6ad469SVishal Kulkarni 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2763*7e6ad469SVishal Kulkarni 
2764*7e6ad469SVishal Kulkarni 	collect_mem_info(pdbg_init, &mem_info);
2765*7e6ad469SVishal Kulkarni 
2766*7e6ad469SVishal Kulkarni 	if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2767*7e6ad469SVishal Kulkarni 		mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2768*7e6ad469SVishal Kulkarni 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2769*7e6ad469SVishal Kulkarni 				 mc0_size, cudbg_err);
2770*7e6ad469SVishal Kulkarni 		if (rc)
2771*7e6ad469SVishal Kulkarni 			goto err;
2772*7e6ad469SVishal Kulkarni 	} else {
2773*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2774*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2775*7e6ad469SVishal Kulkarni 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2776*7e6ad469SVishal Kulkarni 					 "%s(), collect_mem_info failed!, %s\n",
2777*7e6ad469SVishal Kulkarni 				 __func__, err_msg[-rc]);
2778*7e6ad469SVishal Kulkarni 		goto err;
2779*7e6ad469SVishal Kulkarni 	}
2780*7e6ad469SVishal Kulkarni 
2781*7e6ad469SVishal Kulkarni err:
2782*7e6ad469SVishal Kulkarni 	return rc;
2783*7e6ad469SVishal Kulkarni }
2784*7e6ad469SVishal Kulkarni 
2785*7e6ad469SVishal Kulkarni static int
2786*7e6ad469SVishal Kulkarni collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2787*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
2788*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
2789*7e6ad469SVishal Kulkarni {
2790*7e6ad469SVishal Kulkarni 	struct card_mem mem_info = {0};
2791*7e6ad469SVishal Kulkarni 	unsigned long mc1_size;
2792*7e6ad469SVishal Kulkarni 	int rc;
2793*7e6ad469SVishal Kulkarni 
2794*7e6ad469SVishal Kulkarni 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2795*7e6ad469SVishal Kulkarni 
2796*7e6ad469SVishal Kulkarni 	collect_mem_info(pdbg_init, &mem_info);
2797*7e6ad469SVishal Kulkarni 
2798*7e6ad469SVishal Kulkarni 	if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2799*7e6ad469SVishal Kulkarni 		mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2800*7e6ad469SVishal Kulkarni 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2801*7e6ad469SVishal Kulkarni 				 mc1_size, cudbg_err);
2802*7e6ad469SVishal Kulkarni 		if (rc)
2803*7e6ad469SVishal Kulkarni 			goto err;
2804*7e6ad469SVishal Kulkarni 	} else {
2805*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2806*7e6ad469SVishal Kulkarni 
2807*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
2808*7e6ad469SVishal Kulkarni 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2809*7e6ad469SVishal Kulkarni 					"%s(), collect_mem_info failed!, %s\n",
2810*7e6ad469SVishal Kulkarni 				 __func__, err_msg[-rc]);
2811*7e6ad469SVishal Kulkarni 		goto err;
2812*7e6ad469SVishal Kulkarni 	}
2813*7e6ad469SVishal Kulkarni err:
2814*7e6ad469SVishal Kulkarni 	return rc;
2815*7e6ad469SVishal Kulkarni }
2816*7e6ad469SVishal Kulkarni 
2817*7e6ad469SVishal Kulkarni static int
2818*7e6ad469SVishal Kulkarni collect_reg_dump(struct cudbg_init *pdbg_init,
2819*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
2820*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
2821*7e6ad469SVishal Kulkarni {
2822*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2823*7e6ad469SVishal Kulkarni 	struct cudbg_buffer tmp_scratch_buff;
2824*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2825*7e6ad469SVishal Kulkarni 	unsigned long	     bytes_read = 0;
2826*7e6ad469SVishal Kulkarni 	unsigned long	     bytes_left;
2827*7e6ad469SVishal Kulkarni 	u32		     buf_size = 0, bytes = 0;
2828*7e6ad469SVishal Kulkarni 	int		     rc = 0;
2829*7e6ad469SVishal Kulkarni 
2830*7e6ad469SVishal Kulkarni 	if (is_t4(padap->params.chip))
2831*7e6ad469SVishal Kulkarni 		buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2832*7e6ad469SVishal Kulkarni 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
2833*7e6ad469SVishal Kulkarni 		buf_size = T5_REGMAP_SIZE;
2834*7e6ad469SVishal Kulkarni 
2835*7e6ad469SVishal Kulkarni 	scratch_buff.size = buf_size;
2836*7e6ad469SVishal Kulkarni 
2837*7e6ad469SVishal Kulkarni 	tmp_scratch_buff = scratch_buff;
2838*7e6ad469SVishal Kulkarni 
2839*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2840*7e6ad469SVishal Kulkarni 	if (rc)
2841*7e6ad469SVishal Kulkarni 		goto err;
2842*7e6ad469SVishal Kulkarni 
2843*7e6ad469SVishal Kulkarni 	/* no return */
2844*7e6ad469SVishal Kulkarni 	t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2845*7e6ad469SVishal Kulkarni 	bytes_left =   scratch_buff.size;
2846*7e6ad469SVishal Kulkarni 
2847*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2848*7e6ad469SVishal Kulkarni 	if (rc)
2849*7e6ad469SVishal Kulkarni 		goto err1;
2850*7e6ad469SVishal Kulkarni 
2851*7e6ad469SVishal Kulkarni 	while (bytes_left > 0) {
2852*7e6ad469SVishal Kulkarni 		tmp_scratch_buff.data =
2853*7e6ad469SVishal Kulkarni 			((char *)scratch_buff.data) + bytes_read;
2854*7e6ad469SVishal Kulkarni 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2855*7e6ad469SVishal Kulkarni 		tmp_scratch_buff.size = bytes;
2856*7e6ad469SVishal Kulkarni 		compress_buff(&tmp_scratch_buff, dbg_buff);
2857*7e6ad469SVishal Kulkarni 		bytes_left -= bytes;
2858*7e6ad469SVishal Kulkarni 		bytes_read += bytes;
2859*7e6ad469SVishal Kulkarni 	}
2860*7e6ad469SVishal Kulkarni 
2861*7e6ad469SVishal Kulkarni err1:
2862*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2863*7e6ad469SVishal Kulkarni err:
2864*7e6ad469SVishal Kulkarni 	return rc;
2865*7e6ad469SVishal Kulkarni }
2866*7e6ad469SVishal Kulkarni 
2867*7e6ad469SVishal Kulkarni static int
2868*7e6ad469SVishal Kulkarni collect_cctrl(struct cudbg_init *pdbg_init,
2869*7e6ad469SVishal Kulkarni 	      struct cudbg_buffer *dbg_buff,
2870*7e6ad469SVishal Kulkarni 	      struct cudbg_error *cudbg_err)
2871*7e6ad469SVishal Kulkarni {
2872*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2873*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2874*7e6ad469SVishal Kulkarni 	u32 size;
2875*7e6ad469SVishal Kulkarni 	int rc;
2876*7e6ad469SVishal Kulkarni 
2877*7e6ad469SVishal Kulkarni 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2878*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
2879*7e6ad469SVishal Kulkarni 
2880*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2881*7e6ad469SVishal Kulkarni 	if (rc)
2882*7e6ad469SVishal Kulkarni 		goto err;
2883*7e6ad469SVishal Kulkarni 
2884*7e6ad469SVishal Kulkarni 	t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2885*7e6ad469SVishal Kulkarni 
2886*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2887*7e6ad469SVishal Kulkarni 	if (rc)
2888*7e6ad469SVishal Kulkarni 		goto err1;
2889*7e6ad469SVishal Kulkarni 
2890*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
2891*7e6ad469SVishal Kulkarni 
2892*7e6ad469SVishal Kulkarni err1:
2893*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
2894*7e6ad469SVishal Kulkarni err:
2895*7e6ad469SVishal Kulkarni 	return rc;
2896*7e6ad469SVishal Kulkarni }
2897*7e6ad469SVishal Kulkarni 
2898*7e6ad469SVishal Kulkarni static int
2899*7e6ad469SVishal Kulkarni check_busy_bit(struct adapter *padap)
2900*7e6ad469SVishal Kulkarni {
2901*7e6ad469SVishal Kulkarni 	u32 val;
2902*7e6ad469SVishal Kulkarni 	u32 busy = 1;
2903*7e6ad469SVishal Kulkarni 	int i = 0;
2904*7e6ad469SVishal Kulkarni 	int retry = 10;
2905*7e6ad469SVishal Kulkarni 	int status = 0;
2906*7e6ad469SVishal Kulkarni 
2907*7e6ad469SVishal Kulkarni 	while (busy & (1 < retry)) {
2908*7e6ad469SVishal Kulkarni 		val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2909*7e6ad469SVishal Kulkarni 		busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2910*7e6ad469SVishal Kulkarni 		i++;
2911*7e6ad469SVishal Kulkarni 	}
2912*7e6ad469SVishal Kulkarni 
2913*7e6ad469SVishal Kulkarni 	if (busy)
2914*7e6ad469SVishal Kulkarni 		status = -1;
2915*7e6ad469SVishal Kulkarni 
2916*7e6ad469SVishal Kulkarni 	return status;
2917*7e6ad469SVishal Kulkarni }
2918*7e6ad469SVishal Kulkarni 
2919*7e6ad469SVishal Kulkarni static int
2920*7e6ad469SVishal Kulkarni cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2921*7e6ad469SVishal Kulkarni {
2922*7e6ad469SVishal Kulkarni 	int rc = 0;
2923*7e6ad469SVishal Kulkarni 
2924*7e6ad469SVishal Kulkarni 	/* write register address into the A_CIM_HOST_ACC_CTRL */
2925*7e6ad469SVishal Kulkarni 	t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2926*7e6ad469SVishal Kulkarni 
2927*7e6ad469SVishal Kulkarni 	/* Poll HOSTBUSY */
2928*7e6ad469SVishal Kulkarni 	rc = check_busy_bit(padap);
2929*7e6ad469SVishal Kulkarni 	if (rc)
2930*7e6ad469SVishal Kulkarni 		goto err;
2931*7e6ad469SVishal Kulkarni 
2932*7e6ad469SVishal Kulkarni 	/* Read value from A_CIM_HOST_ACC_DATA */
2933*7e6ad469SVishal Kulkarni 	*val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2934*7e6ad469SVishal Kulkarni 
2935*7e6ad469SVishal Kulkarni err:
2936*7e6ad469SVishal Kulkarni 	return rc;
2937*7e6ad469SVishal Kulkarni }
2938*7e6ad469SVishal Kulkarni 
2939*7e6ad469SVishal Kulkarni static int
2940*7e6ad469SVishal Kulkarni dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2941*7e6ad469SVishal Kulkarni 	    struct ireg_field *up_cim_reg, u32 *buff)
2942*7e6ad469SVishal Kulkarni {
2943*7e6ad469SVishal Kulkarni 	u32 i;
2944*7e6ad469SVishal Kulkarni 	int rc = 0;
2945*7e6ad469SVishal Kulkarni 
2946*7e6ad469SVishal Kulkarni 	for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2947*7e6ad469SVishal Kulkarni 		rc = cim_ha_rreg(padap,
2948*7e6ad469SVishal Kulkarni 				 up_cim_reg->ireg_local_offset + (i * 4),
2949*7e6ad469SVishal Kulkarni 				buff);
2950*7e6ad469SVishal Kulkarni 		if (rc) {
2951*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
2952*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
2953*7e6ad469SVishal Kulkarni 						 "BUSY timeout reading"
2954*7e6ad469SVishal Kulkarni 					 "CIM_HOST_ACC_CTRL\n");
2955*7e6ad469SVishal Kulkarni 			goto err;
2956*7e6ad469SVishal Kulkarni 		}
2957*7e6ad469SVishal Kulkarni 
2958*7e6ad469SVishal Kulkarni 		buff++;
2959*7e6ad469SVishal Kulkarni 	}
2960*7e6ad469SVishal Kulkarni 
2961*7e6ad469SVishal Kulkarni err:
2962*7e6ad469SVishal Kulkarni 	return rc;
2963*7e6ad469SVishal Kulkarni }
2964*7e6ad469SVishal Kulkarni 
2965*7e6ad469SVishal Kulkarni static int
2966*7e6ad469SVishal Kulkarni collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2967*7e6ad469SVishal Kulkarni 			struct cudbg_buffer *dbg_buff,
2968*7e6ad469SVishal Kulkarni 			struct cudbg_error *cudbg_err)
2969*7e6ad469SVishal Kulkarni {
2970*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
2971*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
2972*7e6ad469SVishal Kulkarni 	struct ireg_buf *up_cim;
2973*7e6ad469SVishal Kulkarni 	u32 size;
2974*7e6ad469SVishal Kulkarni 	int i, rc, n;
2975*7e6ad469SVishal Kulkarni 
2976*7e6ad469SVishal Kulkarni 	n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
2977*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * n;
2978*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
2979*7e6ad469SVishal Kulkarni 
2980*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2981*7e6ad469SVishal Kulkarni 	if (rc)
2982*7e6ad469SVishal Kulkarni 		goto err;
2983*7e6ad469SVishal Kulkarni 
2984*7e6ad469SVishal Kulkarni 	up_cim = (struct ireg_buf *)scratch_buff.data;
2985*7e6ad469SVishal Kulkarni 
2986*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
2987*7e6ad469SVishal Kulkarni 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2988*7e6ad469SVishal Kulkarni 		u32 *buff = up_cim->outbuf;
2989*7e6ad469SVishal Kulkarni 
2990*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
2991*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2992*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2993*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_local_offset =
2994*7e6ad469SVishal Kulkarni 						t5_up_cim_reg_array[i][2];
2995*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_offset_range =
2996*7e6ad469SVishal Kulkarni 						t5_up_cim_reg_array[i][3];
2997*7e6ad469SVishal Kulkarni 		} else if (is_t6(padap->params.chip)) {
2998*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2999*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3000*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_local_offset =
3001*7e6ad469SVishal Kulkarni 						t6_up_cim_reg_array[i][2];
3002*7e6ad469SVishal Kulkarni 			up_cim_reg->ireg_offset_range =
3003*7e6ad469SVishal Kulkarni 						t6_up_cim_reg_array[i][3];
3004*7e6ad469SVishal Kulkarni 		}
3005*7e6ad469SVishal Kulkarni 
3006*7e6ad469SVishal Kulkarni 		rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3007*7e6ad469SVishal Kulkarni 
3008*7e6ad469SVishal Kulkarni 		up_cim++;
3009*7e6ad469SVishal Kulkarni 	}
3010*7e6ad469SVishal Kulkarni 
3011*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3012*7e6ad469SVishal Kulkarni 	if (rc)
3013*7e6ad469SVishal Kulkarni 		goto err1;
3014*7e6ad469SVishal Kulkarni 
3015*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3016*7e6ad469SVishal Kulkarni 
3017*7e6ad469SVishal Kulkarni err1:
3018*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3019*7e6ad469SVishal Kulkarni err:
3020*7e6ad469SVishal Kulkarni 	return rc;
3021*7e6ad469SVishal Kulkarni }
3022*7e6ad469SVishal Kulkarni 
3023*7e6ad469SVishal Kulkarni static int
3024*7e6ad469SVishal Kulkarni collect_mbox_log(struct cudbg_init *pdbg_init,
3025*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
3026*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
3027*7e6ad469SVishal Kulkarni {
3028*7e6ad469SVishal Kulkarni #ifdef notyet
3029*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3030*7e6ad469SVishal Kulkarni 	struct cudbg_mbox_log *mboxlog = NULL;
3031*7e6ad469SVishal Kulkarni 	struct mbox_cmd_log *log = NULL;
3032*7e6ad469SVishal Kulkarni 	struct mbox_cmd *entry;
3033*7e6ad469SVishal Kulkarni 	u64 flit;
3034*7e6ad469SVishal Kulkarni 	u32 size;
3035*7e6ad469SVishal Kulkarni 	unsigned int entry_idx;
3036*7e6ad469SVishal Kulkarni 	int i, k, rc;
3037*7e6ad469SVishal Kulkarni 	u16 mbox_cmds;
3038*7e6ad469SVishal Kulkarni 
3039*7e6ad469SVishal Kulkarni 	if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3040*7e6ad469SVishal Kulkarni 		log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3041*7e6ad469SVishal Kulkarni 			mboxlog_param.log;
3042*7e6ad469SVishal Kulkarni 		mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3043*7e6ad469SVishal Kulkarni 				mboxlog_param.mbox_cmds;
3044*7e6ad469SVishal Kulkarni 	} else {
3045*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
3046*7e6ad469SVishal Kulkarni 			pdbg_init->print(adap->dip, CE_NOTE,
3047*7e6ad469SVishal Kulkarni 					 "Mbox log is not requested\n");
3048*7e6ad469SVishal Kulkarni 		return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3049*7e6ad469SVishal Kulkarni 	}
3050*7e6ad469SVishal Kulkarni 
3051*7e6ad469SVishal Kulkarni 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3052*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3053*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3054*7e6ad469SVishal Kulkarni 	if (rc)
3055*7e6ad469SVishal Kulkarni 		goto err;
3056*7e6ad469SVishal Kulkarni 
3057*7e6ad469SVishal Kulkarni 	mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3058*7e6ad469SVishal Kulkarni 
3059*7e6ad469SVishal Kulkarni 	for (k = 0; k < mbox_cmds; k++) {
3060*7e6ad469SVishal Kulkarni 		entry_idx = log->cursor + k;
3061*7e6ad469SVishal Kulkarni 		if (entry_idx >= log->size)
3062*7e6ad469SVishal Kulkarni 			entry_idx -= log->size;
3063*7e6ad469SVishal Kulkarni 		entry = mbox_cmd_log_entry(log, entry_idx);
3064*7e6ad469SVishal Kulkarni 
3065*7e6ad469SVishal Kulkarni 		/* skip over unused entries */
3066*7e6ad469SVishal Kulkarni 		if (entry->timestamp == 0)
3067*7e6ad469SVishal Kulkarni 			continue;
3068*7e6ad469SVishal Kulkarni 
3069*7e6ad469SVishal Kulkarni 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3070*7e6ad469SVishal Kulkarni 
3071*7e6ad469SVishal Kulkarni 		for (i = 0; i < MBOX_LEN / 8; i++) {
3072*7e6ad469SVishal Kulkarni 			flit = entry->cmd[i];
3073*7e6ad469SVishal Kulkarni 			mboxlog->hi[i] = (u32)(flit >> 32);
3074*7e6ad469SVishal Kulkarni 			mboxlog->lo[i] = (u32)flit;
3075*7e6ad469SVishal Kulkarni 		}
3076*7e6ad469SVishal Kulkarni 
3077*7e6ad469SVishal Kulkarni 		mboxlog++;
3078*7e6ad469SVishal Kulkarni 	}
3079*7e6ad469SVishal Kulkarni 
3080*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3081*7e6ad469SVishal Kulkarni 	if (rc)
3082*7e6ad469SVishal Kulkarni 		goto err1;
3083*7e6ad469SVishal Kulkarni 
3084*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3085*7e6ad469SVishal Kulkarni 
3086*7e6ad469SVishal Kulkarni err1:
3087*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3088*7e6ad469SVishal Kulkarni err:
3089*7e6ad469SVishal Kulkarni 	return rc;
3090*7e6ad469SVishal Kulkarni #endif
3091*7e6ad469SVishal Kulkarni 	return (-1);
3092*7e6ad469SVishal Kulkarni }
3093*7e6ad469SVishal Kulkarni 
3094*7e6ad469SVishal Kulkarni static int
3095*7e6ad469SVishal Kulkarni collect_pbt_tables(struct cudbg_init *pdbg_init,
3096*7e6ad469SVishal Kulkarni 		   struct cudbg_buffer *dbg_buff,
3097*7e6ad469SVishal Kulkarni 		   struct cudbg_error *cudbg_err)
3098*7e6ad469SVishal Kulkarni {
3099*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3100*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3101*7e6ad469SVishal Kulkarni 	struct cudbg_pbt_tables *pbt = NULL;
3102*7e6ad469SVishal Kulkarni 	u32 size;
3103*7e6ad469SVishal Kulkarni 	u32 addr;
3104*7e6ad469SVishal Kulkarni 	int i, rc;
3105*7e6ad469SVishal Kulkarni 
3106*7e6ad469SVishal Kulkarni 	size = sizeof(struct cudbg_pbt_tables);
3107*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3108*7e6ad469SVishal Kulkarni 
3109*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3110*7e6ad469SVishal Kulkarni 	if (rc)
3111*7e6ad469SVishal Kulkarni 		goto err;
3112*7e6ad469SVishal Kulkarni 
3113*7e6ad469SVishal Kulkarni 	pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3114*7e6ad469SVishal Kulkarni 
3115*7e6ad469SVishal Kulkarni 	/* PBT dynamic entries */
3116*7e6ad469SVishal Kulkarni 	addr = CUDBG_CHAC_PBT_ADDR;
3117*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3118*7e6ad469SVishal Kulkarni 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3119*7e6ad469SVishal Kulkarni 		if (rc) {
3120*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
3121*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
3122*7e6ad469SVishal Kulkarni 						 "BUSY timeout reading"
3123*7e6ad469SVishal Kulkarni 					 "CIM_HOST_ACC_CTRL\n");
3124*7e6ad469SVishal Kulkarni 			goto err1;
3125*7e6ad469SVishal Kulkarni 		}
3126*7e6ad469SVishal Kulkarni 	}
3127*7e6ad469SVishal Kulkarni 
3128*7e6ad469SVishal Kulkarni 	/* PBT static entries */
3129*7e6ad469SVishal Kulkarni 
3130*7e6ad469SVishal Kulkarni 	/* static entries start when bit 6 is set */
3131*7e6ad469SVishal Kulkarni 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3132*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3133*7e6ad469SVishal Kulkarni 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3134*7e6ad469SVishal Kulkarni 		if (rc) {
3135*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
3136*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
3137*7e6ad469SVishal Kulkarni 						 "BUSY timeout reading"
3138*7e6ad469SVishal Kulkarni 					 "CIM_HOST_ACC_CTRL\n");
3139*7e6ad469SVishal Kulkarni 			goto err1;
3140*7e6ad469SVishal Kulkarni 		}
3141*7e6ad469SVishal Kulkarni 	}
3142*7e6ad469SVishal Kulkarni 
3143*7e6ad469SVishal Kulkarni 	/* LRF entries */
3144*7e6ad469SVishal Kulkarni 	addr = CUDBG_CHAC_PBT_LRF;
3145*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3146*7e6ad469SVishal Kulkarni 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3147*7e6ad469SVishal Kulkarni 		if (rc) {
3148*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
3149*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
3150*7e6ad469SVishal Kulkarni 						 "BUSY timeout reading"
3151*7e6ad469SVishal Kulkarni 					 "CIM_HOST_ACC_CTRL\n");
3152*7e6ad469SVishal Kulkarni 			goto err1;
3153*7e6ad469SVishal Kulkarni 		}
3154*7e6ad469SVishal Kulkarni 	}
3155*7e6ad469SVishal Kulkarni 
3156*7e6ad469SVishal Kulkarni 	/* PBT data entries */
3157*7e6ad469SVishal Kulkarni 	addr = CUDBG_CHAC_PBT_DATA;
3158*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3159*7e6ad469SVishal Kulkarni 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3160*7e6ad469SVishal Kulkarni 		if (rc) {
3161*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
3162*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
3163*7e6ad469SVishal Kulkarni 						 "BUSY timeout reading"
3164*7e6ad469SVishal Kulkarni 					 "CIM_HOST_ACC_CTRL\n");
3165*7e6ad469SVishal Kulkarni 			goto err1;
3166*7e6ad469SVishal Kulkarni 		}
3167*7e6ad469SVishal Kulkarni 	}
3168*7e6ad469SVishal Kulkarni 
3169*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3170*7e6ad469SVishal Kulkarni 	if (rc)
3171*7e6ad469SVishal Kulkarni 		goto err1;
3172*7e6ad469SVishal Kulkarni 
3173*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3174*7e6ad469SVishal Kulkarni 
3175*7e6ad469SVishal Kulkarni err1:
3176*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3177*7e6ad469SVishal Kulkarni err:
3178*7e6ad469SVishal Kulkarni 	return rc;
3179*7e6ad469SVishal Kulkarni }
3180*7e6ad469SVishal Kulkarni 
3181*7e6ad469SVishal Kulkarni static int
3182*7e6ad469SVishal Kulkarni collect_pm_indirect(struct cudbg_init *pdbg_init,
3183*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
3184*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
3185*7e6ad469SVishal Kulkarni {
3186*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3187*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3188*7e6ad469SVishal Kulkarni 	struct ireg_buf *ch_pm;
3189*7e6ad469SVishal Kulkarni 	u32 size;
3190*7e6ad469SVishal Kulkarni 	int i, rc, n;
3191*7e6ad469SVishal Kulkarni 
3192*7e6ad469SVishal Kulkarni 	n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3193*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * n * 2;
3194*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3195*7e6ad469SVishal Kulkarni 
3196*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3197*7e6ad469SVishal Kulkarni 	if (rc)
3198*7e6ad469SVishal Kulkarni 		goto err;
3199*7e6ad469SVishal Kulkarni 
3200*7e6ad469SVishal Kulkarni 	ch_pm = (struct ireg_buf *)scratch_buff.data;
3201*7e6ad469SVishal Kulkarni 
3202*7e6ad469SVishal Kulkarni 	/*PM_RX*/
3203*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3204*7e6ad469SVishal Kulkarni 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3205*7e6ad469SVishal Kulkarni 		u32 *buff = ch_pm->outbuf;
3206*7e6ad469SVishal Kulkarni 
3207*7e6ad469SVishal Kulkarni 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3208*7e6ad469SVishal Kulkarni 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
3209*7e6ad469SVishal Kulkarni 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3210*7e6ad469SVishal Kulkarni 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3211*7e6ad469SVishal Kulkarni 
3212*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap,
3213*7e6ad469SVishal Kulkarni 				pm_pio->ireg_addr,
3214*7e6ad469SVishal Kulkarni 				pm_pio->ireg_data,
3215*7e6ad469SVishal Kulkarni 				buff,
3216*7e6ad469SVishal Kulkarni 				pm_pio->ireg_offset_range,
3217*7e6ad469SVishal Kulkarni 				pm_pio->ireg_local_offset);
3218*7e6ad469SVishal Kulkarni 
3219*7e6ad469SVishal Kulkarni 		ch_pm++;
3220*7e6ad469SVishal Kulkarni 	}
3221*7e6ad469SVishal Kulkarni 
3222*7e6ad469SVishal Kulkarni 	/*PM_Tx*/
3223*7e6ad469SVishal Kulkarni 	n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3224*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3225*7e6ad469SVishal Kulkarni 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3226*7e6ad469SVishal Kulkarni 		u32 *buff = ch_pm->outbuf;
3227*7e6ad469SVishal Kulkarni 
3228*7e6ad469SVishal Kulkarni 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3229*7e6ad469SVishal Kulkarni 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
3230*7e6ad469SVishal Kulkarni 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3231*7e6ad469SVishal Kulkarni 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3232*7e6ad469SVishal Kulkarni 
3233*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap,
3234*7e6ad469SVishal Kulkarni 				pm_pio->ireg_addr,
3235*7e6ad469SVishal Kulkarni 				pm_pio->ireg_data,
3236*7e6ad469SVishal Kulkarni 				buff,
3237*7e6ad469SVishal Kulkarni 				pm_pio->ireg_offset_range,
3238*7e6ad469SVishal Kulkarni 				pm_pio->ireg_local_offset);
3239*7e6ad469SVishal Kulkarni 
3240*7e6ad469SVishal Kulkarni 		ch_pm++;
3241*7e6ad469SVishal Kulkarni 	}
3242*7e6ad469SVishal Kulkarni 
3243*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3244*7e6ad469SVishal Kulkarni 	if (rc)
3245*7e6ad469SVishal Kulkarni 		goto err1;
3246*7e6ad469SVishal Kulkarni 
3247*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3248*7e6ad469SVishal Kulkarni 
3249*7e6ad469SVishal Kulkarni err1:
3250*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3251*7e6ad469SVishal Kulkarni err:
3252*7e6ad469SVishal Kulkarni 	return rc;
3253*7e6ad469SVishal Kulkarni 
3254*7e6ad469SVishal Kulkarni }
3255*7e6ad469SVishal Kulkarni 
3256*7e6ad469SVishal Kulkarni static int
3257*7e6ad469SVishal Kulkarni collect_tid(struct cudbg_init *pdbg_init,
3258*7e6ad469SVishal Kulkarni 	    struct cudbg_buffer *dbg_buff,
3259*7e6ad469SVishal Kulkarni 	    struct cudbg_error *cudbg_err)
3260*7e6ad469SVishal Kulkarni {
3261*7e6ad469SVishal Kulkarni 
3262*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3263*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3264*7e6ad469SVishal Kulkarni 	struct tid_info_region *tid;
3265*7e6ad469SVishal Kulkarni 	struct tid_info_region_rev1 *tid1;
3266*7e6ad469SVishal Kulkarni 	u32 para[7], val[7];
3267*7e6ad469SVishal Kulkarni 	u32 mbox, pf;
3268*7e6ad469SVishal Kulkarni 	int rc;
3269*7e6ad469SVishal Kulkarni 
3270*7e6ad469SVishal Kulkarni 	scratch_buff.size = sizeof(struct tid_info_region_rev1);
3271*7e6ad469SVishal Kulkarni 
3272*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3273*7e6ad469SVishal Kulkarni 	if (rc)
3274*7e6ad469SVishal Kulkarni 		goto err;
3275*7e6ad469SVishal Kulkarni 
3276*7e6ad469SVishal Kulkarni #define FW_PARAM_DEV_A(param) \
3277*7e6ad469SVishal Kulkarni 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3278*7e6ad469SVishal Kulkarni 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3279*7e6ad469SVishal Kulkarni #define FW_PARAM_PFVF_A(param) \
3280*7e6ad469SVishal Kulkarni 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3281*7e6ad469SVishal Kulkarni 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3282*7e6ad469SVishal Kulkarni 	 V_FW_PARAMS_PARAM_Y(0) | \
3283*7e6ad469SVishal Kulkarni 	 V_FW_PARAMS_PARAM_Z(0))
3284*7e6ad469SVishal Kulkarni #define MAX_ATIDS_A 8192U
3285*7e6ad469SVishal Kulkarni 
3286*7e6ad469SVishal Kulkarni 	tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3287*7e6ad469SVishal Kulkarni 	tid = &(tid1->tid);
3288*7e6ad469SVishal Kulkarni 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3289*7e6ad469SVishal Kulkarni 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3290*7e6ad469SVishal Kulkarni 	tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3291*7e6ad469SVishal Kulkarni 			     sizeof(struct cudbg_ver_hdr);
3292*7e6ad469SVishal Kulkarni 
3293*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip)) {
3294*7e6ad469SVishal Kulkarni 		tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3295*7e6ad469SVishal Kulkarni 		tid1->tid_start = 0;
3296*7e6ad469SVishal Kulkarni 	} else if (is_t6(padap->params.chip)) {
3297*7e6ad469SVishal Kulkarni 		tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3298*7e6ad469SVishal Kulkarni 		tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3299*7e6ad469SVishal Kulkarni 	}
3300*7e6ad469SVishal Kulkarni 
3301*7e6ad469SVishal Kulkarni 	tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3302*7e6ad469SVishal Kulkarni 
3303*7e6ad469SVishal Kulkarni 	para[0] = FW_PARAM_PFVF_A(FILTER_START);
3304*7e6ad469SVishal Kulkarni 	para[1] = FW_PARAM_PFVF_A(FILTER_END);
3305*7e6ad469SVishal Kulkarni 	para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3306*7e6ad469SVishal Kulkarni 	para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3307*7e6ad469SVishal Kulkarni 	para[4] = FW_PARAM_DEV_A(NTID);
3308*7e6ad469SVishal Kulkarni 	para[5] = FW_PARAM_PFVF_A(SERVER_START);
3309*7e6ad469SVishal Kulkarni 	para[6] = FW_PARAM_PFVF_A(SERVER_END);
3310*7e6ad469SVishal Kulkarni 
3311*7e6ad469SVishal Kulkarni 	rc = begin_synchronized_op(padap->port[0], 1, 1);
3312*7e6ad469SVishal Kulkarni 	if (rc)
3313*7e6ad469SVishal Kulkarni 		goto err;
3314*7e6ad469SVishal Kulkarni 	mbox = padap->mbox;
3315*7e6ad469SVishal Kulkarni 	pf = padap->pf;
3316*7e6ad469SVishal Kulkarni 	rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3317*7e6ad469SVishal Kulkarni 	if (rc <  0) {
3318*7e6ad469SVishal Kulkarni 		if (rc == -FW_EPERM) {
3319*7e6ad469SVishal Kulkarni 			/* It looks like we don't have permission to use
3320*7e6ad469SVishal Kulkarni 			 * padap->mbox.
3321*7e6ad469SVishal Kulkarni 			 *
3322*7e6ad469SVishal Kulkarni 			 * Try mbox 4.  If it works, we'll continue to
3323*7e6ad469SVishal Kulkarni 			 * collect the rest of tid info from mbox 4.
3324*7e6ad469SVishal Kulkarni 			 * Else, quit trying to collect tid info.
3325*7e6ad469SVishal Kulkarni 			 */
3326*7e6ad469SVishal Kulkarni 			mbox = 4;
3327*7e6ad469SVishal Kulkarni 			pf = 4;
3328*7e6ad469SVishal Kulkarni 			rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3329*7e6ad469SVishal Kulkarni 			if (rc < 0) {
3330*7e6ad469SVishal Kulkarni 				cudbg_err->sys_err = rc;
3331*7e6ad469SVishal Kulkarni 				goto err1;
3332*7e6ad469SVishal Kulkarni 			}
3333*7e6ad469SVishal Kulkarni 		} else {
3334*7e6ad469SVishal Kulkarni 			cudbg_err->sys_err = rc;
3335*7e6ad469SVishal Kulkarni 			goto err1;
3336*7e6ad469SVishal Kulkarni 		}
3337*7e6ad469SVishal Kulkarni 	}
3338*7e6ad469SVishal Kulkarni 
3339*7e6ad469SVishal Kulkarni 	tid->ftid_base = val[0];
3340*7e6ad469SVishal Kulkarni 	tid->nftids = val[1] - val[0] + 1;
3341*7e6ad469SVishal Kulkarni 	/*active filter region*/
3342*7e6ad469SVishal Kulkarni 	if (val[2] != val[3]) {
3343*7e6ad469SVishal Kulkarni #ifdef notyet
3344*7e6ad469SVishal Kulkarni 		tid->flags |= FW_OFLD_CONN;
3345*7e6ad469SVishal Kulkarni #endif
3346*7e6ad469SVishal Kulkarni 		tid->aftid_base = val[2];
3347*7e6ad469SVishal Kulkarni 		tid->aftid_end = val[3];
3348*7e6ad469SVishal Kulkarni 	}
3349*7e6ad469SVishal Kulkarni 	tid->ntids = val[4];
3350*7e6ad469SVishal Kulkarni 	tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3351*7e6ad469SVishal Kulkarni 	tid->stid_base = val[5];
3352*7e6ad469SVishal Kulkarni 	tid->nstids = val[6] - val[5] + 1;
3353*7e6ad469SVishal Kulkarni 
3354*7e6ad469SVishal Kulkarni 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
3355*7e6ad469SVishal Kulkarni 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3356*7e6ad469SVishal Kulkarni 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3357*7e6ad469SVishal Kulkarni 		rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3358*7e6ad469SVishal Kulkarni 		if (rc < 0) {
3359*7e6ad469SVishal Kulkarni 			cudbg_err->sys_err = rc;
3360*7e6ad469SVishal Kulkarni 			goto err1;
3361*7e6ad469SVishal Kulkarni 		}
3362*7e6ad469SVishal Kulkarni 
3363*7e6ad469SVishal Kulkarni 		tid->hpftid_base = val[0];
3364*7e6ad469SVishal Kulkarni 		tid->nhpftids = val[1] - val[0] + 1;
3365*7e6ad469SVishal Kulkarni 	}
3366*7e6ad469SVishal Kulkarni 
3367*7e6ad469SVishal Kulkarni 	if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
3368*7e6ad469SVishal Kulkarni 		tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3369*7e6ad469SVishal Kulkarni 		tid->hash_base /= 4;
3370*7e6ad469SVishal Kulkarni 	} else
3371*7e6ad469SVishal Kulkarni 		tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3372*7e6ad469SVishal Kulkarni 
3373*7e6ad469SVishal Kulkarni 	/*UO context range*/
3374*7e6ad469SVishal Kulkarni 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3375*7e6ad469SVishal Kulkarni 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3376*7e6ad469SVishal Kulkarni 
3377*7e6ad469SVishal Kulkarni 	rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3378*7e6ad469SVishal Kulkarni 	if (rc <  0) {
3379*7e6ad469SVishal Kulkarni 		cudbg_err->sys_err = rc;
3380*7e6ad469SVishal Kulkarni 		goto err1;
3381*7e6ad469SVishal Kulkarni 	}
3382*7e6ad469SVishal Kulkarni 
3383*7e6ad469SVishal Kulkarni 	if (val[0] != val[1]) {
3384*7e6ad469SVishal Kulkarni 		tid->uotid_base = val[0];
3385*7e6ad469SVishal Kulkarni 		tid->nuotids = val[1] - val[0] + 1;
3386*7e6ad469SVishal Kulkarni 	}
3387*7e6ad469SVishal Kulkarni 	tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3388*7e6ad469SVishal Kulkarni 	tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3389*7e6ad469SVishal Kulkarni 
3390*7e6ad469SVishal Kulkarni #undef FW_PARAM_PFVF_A
3391*7e6ad469SVishal Kulkarni #undef FW_PARAM_DEV_A
3392*7e6ad469SVishal Kulkarni #undef MAX_ATIDS_A
3393*7e6ad469SVishal Kulkarni 
3394*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3395*7e6ad469SVishal Kulkarni 	if (rc)
3396*7e6ad469SVishal Kulkarni 		goto err1;
3397*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3398*7e6ad469SVishal Kulkarni 
3399*7e6ad469SVishal Kulkarni err1:
3400*7e6ad469SVishal Kulkarni 	end_synchronized_op(padap->port[0], 1);
3401*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3402*7e6ad469SVishal Kulkarni err:
3403*7e6ad469SVishal Kulkarni 	return rc;
3404*7e6ad469SVishal Kulkarni }
3405*7e6ad469SVishal Kulkarni 
3406*7e6ad469SVishal Kulkarni static int
3407*7e6ad469SVishal Kulkarni collect_tx_rate(struct cudbg_init *pdbg_init,
3408*7e6ad469SVishal Kulkarni 		struct cudbg_buffer *dbg_buff,
3409*7e6ad469SVishal Kulkarni 		struct cudbg_error *cudbg_err)
3410*7e6ad469SVishal Kulkarni {
3411*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3412*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3413*7e6ad469SVishal Kulkarni 	struct tx_rate *tx_rate;
3414*7e6ad469SVishal Kulkarni 	u32 size;
3415*7e6ad469SVishal Kulkarni 	int rc;
3416*7e6ad469SVishal Kulkarni 
3417*7e6ad469SVishal Kulkarni 	size = sizeof(struct tx_rate);
3418*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3419*7e6ad469SVishal Kulkarni 
3420*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3421*7e6ad469SVishal Kulkarni 	if (rc)
3422*7e6ad469SVishal Kulkarni 		goto err;
3423*7e6ad469SVishal Kulkarni 
3424*7e6ad469SVishal Kulkarni 	tx_rate = (struct tx_rate *)scratch_buff.data;
3425*7e6ad469SVishal Kulkarni 	t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3426*7e6ad469SVishal Kulkarni 	tx_rate->nchan = padap->params.arch.nchan;
3427*7e6ad469SVishal Kulkarni 
3428*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3429*7e6ad469SVishal Kulkarni 	if (rc)
3430*7e6ad469SVishal Kulkarni 		goto err1;
3431*7e6ad469SVishal Kulkarni 
3432*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3433*7e6ad469SVishal Kulkarni 
3434*7e6ad469SVishal Kulkarni err1:
3435*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3436*7e6ad469SVishal Kulkarni err:
3437*7e6ad469SVishal Kulkarni 	return rc;
3438*7e6ad469SVishal Kulkarni }
3439*7e6ad469SVishal Kulkarni 
3440*7e6ad469SVishal Kulkarni static inline void
3441*7e6ad469SVishal Kulkarni cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3442*7e6ad469SVishal Kulkarni {
3443*7e6ad469SVishal Kulkarni 	*mask = x | y;
3444*7e6ad469SVishal Kulkarni 	y = (__force u64)cpu_to_be64(y);
3445*7e6ad469SVishal Kulkarni 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
3446*7e6ad469SVishal Kulkarni }
3447*7e6ad469SVishal Kulkarni 
3448*7e6ad469SVishal Kulkarni static void
3449*7e6ad469SVishal Kulkarni mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3450*7e6ad469SVishal Kulkarni {
3451*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip)) {
3452*7e6ad469SVishal Kulkarni 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3453*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP3));
3454*7e6ad469SVishal Kulkarni 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3455*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP2));
3456*7e6ad469SVishal Kulkarni 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3457*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP1));
3458*7e6ad469SVishal Kulkarni 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3459*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP0));
3460*7e6ad469SVishal Kulkarni 	} else {
3461*7e6ad469SVishal Kulkarni 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3462*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP7));
3463*7e6ad469SVishal Kulkarni 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3464*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP6));
3465*7e6ad469SVishal Kulkarni 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3466*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP5));
3467*7e6ad469SVishal Kulkarni 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3468*7e6ad469SVishal Kulkarni 							  A_MPS_VF_RPLCT_MAP4));
3469*7e6ad469SVishal Kulkarni 	}
3470*7e6ad469SVishal Kulkarni 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3471*7e6ad469SVishal Kulkarni 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3472*7e6ad469SVishal Kulkarni 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3473*7e6ad469SVishal Kulkarni 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3474*7e6ad469SVishal Kulkarni }
3475*7e6ad469SVishal Kulkarni 
3476*7e6ad469SVishal Kulkarni static int
3477*7e6ad469SVishal Kulkarni collect_mps_tcam(struct cudbg_init *pdbg_init,
3478*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
3479*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
3480*7e6ad469SVishal Kulkarni {
3481*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3482*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3483*7e6ad469SVishal Kulkarni 	struct cudbg_mps_tcam *tcam = NULL;
3484*7e6ad469SVishal Kulkarni 	u32 size = 0, i, n, total_size = 0;
3485*7e6ad469SVishal Kulkarni 	u32 ctl, data2;
3486*7e6ad469SVishal Kulkarni 	u64 tcamy, tcamx, val;
3487*7e6ad469SVishal Kulkarni 	int rc;
3488*7e6ad469SVishal Kulkarni 
3489*7e6ad469SVishal Kulkarni 
3490*7e6ad469SVishal Kulkarni 	n = padap->params.arch.mps_tcam_size;
3491*7e6ad469SVishal Kulkarni 	size = sizeof(struct cudbg_mps_tcam) * n;
3492*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3493*7e6ad469SVishal Kulkarni 
3494*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3495*7e6ad469SVishal Kulkarni 	if (rc)
3496*7e6ad469SVishal Kulkarni 		goto err;
3497*7e6ad469SVishal Kulkarni 	memset(scratch_buff.data, 0, size);
3498*7e6ad469SVishal Kulkarni 
3499*7e6ad469SVishal Kulkarni 	tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3500*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3501*7e6ad469SVishal Kulkarni 		if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
3502*7e6ad469SVishal Kulkarni 			/* CtlReqID   - 1: use Host Driver Requester ID
3503*7e6ad469SVishal Kulkarni 			 * CtlCmdType - 0: Read, 1: Write
3504*7e6ad469SVishal Kulkarni 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3505*7e6ad469SVishal Kulkarni 			 * CtlXYBitSel- 0: Y bit, 1: X bit
3506*7e6ad469SVishal Kulkarni 			 */
3507*7e6ad469SVishal Kulkarni 
3508*7e6ad469SVishal Kulkarni 			/* Read tcamy */
3509*7e6ad469SVishal Kulkarni 			ctl = (V_CTLREQID(1) |
3510*7e6ad469SVishal Kulkarni 			       V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3511*7e6ad469SVishal Kulkarni 			if (i < 256)
3512*7e6ad469SVishal Kulkarni 				ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3513*7e6ad469SVishal Kulkarni 			else
3514*7e6ad469SVishal Kulkarni 				ctl |= V_CTLTCAMINDEX(i - 256) |
3515*7e6ad469SVishal Kulkarni 				       V_CTLTCAMSEL(1);
3516*7e6ad469SVishal Kulkarni 
3517*7e6ad469SVishal Kulkarni 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3518*7e6ad469SVishal Kulkarni 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3519*7e6ad469SVishal Kulkarni 			tcamy = G_DMACH(val) << 32;
3520*7e6ad469SVishal Kulkarni 			tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3521*7e6ad469SVishal Kulkarni 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3522*7e6ad469SVishal Kulkarni 			tcam->lookup_type = G_DATALKPTYPE(data2);
3523*7e6ad469SVishal Kulkarni 
3524*7e6ad469SVishal Kulkarni 			/* 0 - Outer header, 1 - Inner header
3525*7e6ad469SVishal Kulkarni 			 * [71:48] bit locations are overloaded for
3526*7e6ad469SVishal Kulkarni 			 * outer vs. inner lookup types.
3527*7e6ad469SVishal Kulkarni 			 */
3528*7e6ad469SVishal Kulkarni 
3529*7e6ad469SVishal Kulkarni 			if (tcam->lookup_type &&
3530*7e6ad469SVishal Kulkarni 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3531*7e6ad469SVishal Kulkarni 				/* Inner header VNI */
3532*7e6ad469SVishal Kulkarni 				tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3533*7e6ad469SVishal Kulkarni 					     (G_DATAVIDH1(data2) << 16) |
3534*7e6ad469SVishal Kulkarni 					     G_VIDL(val);
3535*7e6ad469SVishal Kulkarni 				tcam->dip_hit = data2 & F_DATADIPHIT;
3536*7e6ad469SVishal Kulkarni 			} else {
3537*7e6ad469SVishal Kulkarni 				tcam->vlan_vld = data2 & F_DATAVIDH2;
3538*7e6ad469SVishal Kulkarni 				tcam->ivlan = G_VIDL(val);
3539*7e6ad469SVishal Kulkarni 			}
3540*7e6ad469SVishal Kulkarni 
3541*7e6ad469SVishal Kulkarni 			tcam->port_num = G_DATAPORTNUM(data2);
3542*7e6ad469SVishal Kulkarni 
3543*7e6ad469SVishal Kulkarni 			/* Read tcamx. Change the control param */
3544*7e6ad469SVishal Kulkarni 			ctl |= V_CTLXYBITSEL(1);
3545*7e6ad469SVishal Kulkarni 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3546*7e6ad469SVishal Kulkarni 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3547*7e6ad469SVishal Kulkarni 			tcamx = G_DMACH(val) << 32;
3548*7e6ad469SVishal Kulkarni 			tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3549*7e6ad469SVishal Kulkarni 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3550*7e6ad469SVishal Kulkarni 			if (tcam->lookup_type &&
3551*7e6ad469SVishal Kulkarni 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3552*7e6ad469SVishal Kulkarni 				/* Inner header VNI mask */
3553*7e6ad469SVishal Kulkarni 				tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3554*7e6ad469SVishal Kulkarni 					     (G_DATAVIDH1(data2) << 16) |
3555*7e6ad469SVishal Kulkarni 					     G_VIDL(val);
3556*7e6ad469SVishal Kulkarni 			}
3557*7e6ad469SVishal Kulkarni 		} else {
3558*7e6ad469SVishal Kulkarni 			tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3559*7e6ad469SVishal Kulkarni 			tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3560*7e6ad469SVishal Kulkarni 		}
3561*7e6ad469SVishal Kulkarni 
3562*7e6ad469SVishal Kulkarni 		if (tcamx & tcamy)
3563*7e6ad469SVishal Kulkarni 			continue;
3564*7e6ad469SVishal Kulkarni 
3565*7e6ad469SVishal Kulkarni 		tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3566*7e6ad469SVishal Kulkarni 		tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3567*7e6ad469SVishal Kulkarni 
3568*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip))
3569*7e6ad469SVishal Kulkarni 			tcam->repli = (tcam->cls_lo & F_REPLICATE);
3570*7e6ad469SVishal Kulkarni 		else if (is_t6(padap->params.chip))
3571*7e6ad469SVishal Kulkarni 			tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3572*7e6ad469SVishal Kulkarni 
3573*7e6ad469SVishal Kulkarni 		if (tcam->repli) {
3574*7e6ad469SVishal Kulkarni 			struct fw_ldst_cmd ldst_cmd;
3575*7e6ad469SVishal Kulkarni 			struct fw_ldst_mps_rplc mps_rplc;
3576*7e6ad469SVishal Kulkarni 
3577*7e6ad469SVishal Kulkarni 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3578*7e6ad469SVishal Kulkarni 			ldst_cmd.op_to_addrspace =
3579*7e6ad469SVishal Kulkarni 				htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3580*7e6ad469SVishal Kulkarni 				      F_FW_CMD_REQUEST |
3581*7e6ad469SVishal Kulkarni 				      F_FW_CMD_READ |
3582*7e6ad469SVishal Kulkarni 				      V_FW_LDST_CMD_ADDRSPACE(
3583*7e6ad469SVishal Kulkarni 					      FW_LDST_ADDRSPC_MPS));
3584*7e6ad469SVishal Kulkarni 
3585*7e6ad469SVishal Kulkarni 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3586*7e6ad469SVishal Kulkarni 
3587*7e6ad469SVishal Kulkarni 			ldst_cmd.u.mps.rplc.fid_idx =
3588*7e6ad469SVishal Kulkarni 				htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3589*7e6ad469SVishal Kulkarni 				      V_FW_LDST_CMD_IDX(i));
3590*7e6ad469SVishal Kulkarni 
3591*7e6ad469SVishal Kulkarni 			rc = begin_synchronized_op(padap->port[0], 1, 1);
3592*7e6ad469SVishal Kulkarni 			if (rc == 0) {
3593*7e6ad469SVishal Kulkarni 				rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3594*7e6ad469SVishal Kulkarni 						sizeof(ldst_cmd), &ldst_cmd);
3595*7e6ad469SVishal Kulkarni 				end_synchronized_op(padap->port[0], 1);
3596*7e6ad469SVishal Kulkarni 			}
3597*7e6ad469SVishal Kulkarni 
3598*7e6ad469SVishal Kulkarni 			if (rc)
3599*7e6ad469SVishal Kulkarni 				mps_rpl_backdoor(padap, &mps_rplc);
3600*7e6ad469SVishal Kulkarni 			else
3601*7e6ad469SVishal Kulkarni 				mps_rplc = ldst_cmd.u.mps.rplc;
3602*7e6ad469SVishal Kulkarni 
3603*7e6ad469SVishal Kulkarni 			tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3604*7e6ad469SVishal Kulkarni 			tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3605*7e6ad469SVishal Kulkarni 			tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3606*7e6ad469SVishal Kulkarni 			tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3607*7e6ad469SVishal Kulkarni 			if (padap->params.arch.mps_rplc_size >
3608*7e6ad469SVishal Kulkarni 					CUDBG_MAX_RPLC_SIZE) {
3609*7e6ad469SVishal Kulkarni 				tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3610*7e6ad469SVishal Kulkarni 				tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3611*7e6ad469SVishal Kulkarni 				tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3612*7e6ad469SVishal Kulkarni 				tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3613*7e6ad469SVishal Kulkarni 			}
3614*7e6ad469SVishal Kulkarni 		}
3615*7e6ad469SVishal Kulkarni 		cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3616*7e6ad469SVishal Kulkarni 
3617*7e6ad469SVishal Kulkarni 		tcam->idx = i;
3618*7e6ad469SVishal Kulkarni 		tcam->rplc_size = padap->params.arch.mps_rplc_size;
3619*7e6ad469SVishal Kulkarni 
3620*7e6ad469SVishal Kulkarni 		total_size += sizeof(struct cudbg_mps_tcam);
3621*7e6ad469SVishal Kulkarni 
3622*7e6ad469SVishal Kulkarni 		tcam++;
3623*7e6ad469SVishal Kulkarni 	}
3624*7e6ad469SVishal Kulkarni 
3625*7e6ad469SVishal Kulkarni 	if (total_size == 0) {
3626*7e6ad469SVishal Kulkarni 		rc = CUDBG_SYSTEM_ERROR;
3627*7e6ad469SVishal Kulkarni 		goto err1;
3628*7e6ad469SVishal Kulkarni 	}
3629*7e6ad469SVishal Kulkarni 
3630*7e6ad469SVishal Kulkarni 	scratch_buff.size = total_size;
3631*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3632*7e6ad469SVishal Kulkarni 	if (rc)
3633*7e6ad469SVishal Kulkarni 		goto err1;
3634*7e6ad469SVishal Kulkarni 
3635*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3636*7e6ad469SVishal Kulkarni 
3637*7e6ad469SVishal Kulkarni err1:
3638*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3639*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3640*7e6ad469SVishal Kulkarni err:
3641*7e6ad469SVishal Kulkarni 	return rc;
3642*7e6ad469SVishal Kulkarni }
3643*7e6ad469SVishal Kulkarni 
3644*7e6ad469SVishal Kulkarni static int
3645*7e6ad469SVishal Kulkarni collect_pcie_config(struct cudbg_init *pdbg_init,
3646*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
3647*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
3648*7e6ad469SVishal Kulkarni {
3649*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3650*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3651*7e6ad469SVishal Kulkarni 	u32 size, *value, j;
3652*7e6ad469SVishal Kulkarni 	int i, rc, n;
3653*7e6ad469SVishal Kulkarni 
3654*7e6ad469SVishal Kulkarni 	size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3655*7e6ad469SVishal Kulkarni 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3656*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3657*7e6ad469SVishal Kulkarni 
3658*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3659*7e6ad469SVishal Kulkarni 	if (rc)
3660*7e6ad469SVishal Kulkarni 		goto err;
3661*7e6ad469SVishal Kulkarni 
3662*7e6ad469SVishal Kulkarni 	value = (u32 *)scratch_buff.data;
3663*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3664*7e6ad469SVishal Kulkarni 		for (j = t5_pcie_config_array[i][0];
3665*7e6ad469SVishal Kulkarni 		     j <= t5_pcie_config_array[i][1]; j += 4) {
3666*7e6ad469SVishal Kulkarni 			t4_hw_pci_read_cfg4(padap, j, value++);
3667*7e6ad469SVishal Kulkarni 		}
3668*7e6ad469SVishal Kulkarni 	}
3669*7e6ad469SVishal Kulkarni 
3670*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3671*7e6ad469SVishal Kulkarni 	if (rc)
3672*7e6ad469SVishal Kulkarni 		goto err1;
3673*7e6ad469SVishal Kulkarni 
3674*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3675*7e6ad469SVishal Kulkarni 
3676*7e6ad469SVishal Kulkarni err1:
3677*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3678*7e6ad469SVishal Kulkarni err:
3679*7e6ad469SVishal Kulkarni 	return rc;
3680*7e6ad469SVishal Kulkarni }
3681*7e6ad469SVishal Kulkarni 
3682*7e6ad469SVishal Kulkarni static int
3683*7e6ad469SVishal Kulkarni cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3684*7e6ad469SVishal Kulkarni 	       struct cudbg_tid_data *tid_data)
3685*7e6ad469SVishal Kulkarni {
3686*7e6ad469SVishal Kulkarni 	int i, cmd_retry = 8;
3687*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3688*7e6ad469SVishal Kulkarni 	u32 val;
3689*7e6ad469SVishal Kulkarni 
3690*7e6ad469SVishal Kulkarni 	/* Fill REQ_DATA regs with 0's */
3691*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3692*7e6ad469SVishal Kulkarni 		t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3693*7e6ad469SVishal Kulkarni 
3694*7e6ad469SVishal Kulkarni 	/* Write DBIG command */
3695*7e6ad469SVishal Kulkarni 	val = (0x4 << S_DBGICMD) | tid;
3696*7e6ad469SVishal Kulkarni 	t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3697*7e6ad469SVishal Kulkarni 	tid_data->dbig_cmd = val;
3698*7e6ad469SVishal Kulkarni 
3699*7e6ad469SVishal Kulkarni 	val = 0;
3700*7e6ad469SVishal Kulkarni 	val |= 1 << S_DBGICMDSTRT;
3701*7e6ad469SVishal Kulkarni 	val |= 1;  /* LE mode */
3702*7e6ad469SVishal Kulkarni 	t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3703*7e6ad469SVishal Kulkarni 	tid_data->dbig_conf = val;
3704*7e6ad469SVishal Kulkarni 
3705*7e6ad469SVishal Kulkarni 	/* Poll the DBGICMDBUSY bit */
3706*7e6ad469SVishal Kulkarni 	val = 1;
3707*7e6ad469SVishal Kulkarni 	while (val) {
3708*7e6ad469SVishal Kulkarni 		val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3709*7e6ad469SVishal Kulkarni 		val = (val >> S_DBGICMDBUSY) & 1;
3710*7e6ad469SVishal Kulkarni 		cmd_retry--;
3711*7e6ad469SVishal Kulkarni 		if (!cmd_retry) {
3712*7e6ad469SVishal Kulkarni 			if (pdbg_init->verbose)
3713*7e6ad469SVishal Kulkarni 				pdbg_init->print(padap->dip, CE_NOTE,
3714*7e6ad469SVishal Kulkarni 						 "%s(): Timeout waiting for non-busy\n",
3715*7e6ad469SVishal Kulkarni 					 __func__);
3716*7e6ad469SVishal Kulkarni 			return CUDBG_SYSTEM_ERROR;
3717*7e6ad469SVishal Kulkarni 		}
3718*7e6ad469SVishal Kulkarni 	}
3719*7e6ad469SVishal Kulkarni 
3720*7e6ad469SVishal Kulkarni 	/* Check RESP status */
3721*7e6ad469SVishal Kulkarni 	val = 0;
3722*7e6ad469SVishal Kulkarni 	val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3723*7e6ad469SVishal Kulkarni 	tid_data->dbig_rsp_stat = val;
3724*7e6ad469SVishal Kulkarni 	if (!(val & 1)) {
3725*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
3726*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
3727*7e6ad469SVishal Kulkarni 					 "%s(): DBGI command failed\n", __func__);
3728*7e6ad469SVishal Kulkarni 		return CUDBG_SYSTEM_ERROR;
3729*7e6ad469SVishal Kulkarni 	}
3730*7e6ad469SVishal Kulkarni 
3731*7e6ad469SVishal Kulkarni 	/* Read RESP data */
3732*7e6ad469SVishal Kulkarni 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3733*7e6ad469SVishal Kulkarni 		tid_data->data[i] = t4_read_reg(padap,
3734*7e6ad469SVishal Kulkarni 						A_LE_DB_DBGI_RSP_DATA +
3735*7e6ad469SVishal Kulkarni 						(i << 2));
3736*7e6ad469SVishal Kulkarni 
3737*7e6ad469SVishal Kulkarni 	tid_data->tid = tid;
3738*7e6ad469SVishal Kulkarni 
3739*7e6ad469SVishal Kulkarni 	return 0;
3740*7e6ad469SVishal Kulkarni }
3741*7e6ad469SVishal Kulkarni 
3742*7e6ad469SVishal Kulkarni static int
3743*7e6ad469SVishal Kulkarni collect_le_tcam(struct cudbg_init *pdbg_init,
3744*7e6ad469SVishal Kulkarni 		struct cudbg_buffer *dbg_buff,
3745*7e6ad469SVishal Kulkarni 		struct cudbg_error *cudbg_err)
3746*7e6ad469SVishal Kulkarni {
3747*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3748*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3749*7e6ad469SVishal Kulkarni 	struct cudbg_tcam tcam_region = {0};
3750*7e6ad469SVishal Kulkarni 	struct cudbg_tid_data *tid_data = NULL;
3751*7e6ad469SVishal Kulkarni 	u32 value, bytes = 0, bytes_left  = 0;
3752*7e6ad469SVishal Kulkarni 	u32 i;
3753*7e6ad469SVishal Kulkarni 	int rc, size;
3754*7e6ad469SVishal Kulkarni 
3755*7e6ad469SVishal Kulkarni 	/* Get the LE regions */
3756*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3757*7e6ad469SVishal Kulkarni 							     index */
3758*7e6ad469SVishal Kulkarni 	tcam_region.tid_hash_base = value;
3759*7e6ad469SVishal Kulkarni 
3760*7e6ad469SVishal Kulkarni 	/* Get routing table index */
3761*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3762*7e6ad469SVishal Kulkarni 	tcam_region.routing_start = value;
3763*7e6ad469SVishal Kulkarni 
3764*7e6ad469SVishal Kulkarni 	/*Get clip table index */
3765*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3766*7e6ad469SVishal Kulkarni 	tcam_region.clip_start = value;
3767*7e6ad469SVishal Kulkarni 
3768*7e6ad469SVishal Kulkarni 	/* Get filter table index */
3769*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3770*7e6ad469SVishal Kulkarni 	tcam_region.filter_start = value;
3771*7e6ad469SVishal Kulkarni 
3772*7e6ad469SVishal Kulkarni 	/* Get server table index */
3773*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3774*7e6ad469SVishal Kulkarni 	tcam_region.server_start = value;
3775*7e6ad469SVishal Kulkarni 
3776*7e6ad469SVishal Kulkarni 	/* Check whether hash is enabled and calculate the max tids */
3777*7e6ad469SVishal Kulkarni 	value = t4_read_reg(padap, A_LE_DB_CONFIG);
3778*7e6ad469SVishal Kulkarni 	if ((value >> S_HASHEN) & 1) {
3779*7e6ad469SVishal Kulkarni 		value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3780*7e6ad469SVishal Kulkarni 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
3781*7e6ad469SVishal Kulkarni 			tcam_region.max_tid = (value & 0xFFFFF) +
3782*7e6ad469SVishal Kulkarni 					      tcam_region.tid_hash_base;
3783*7e6ad469SVishal Kulkarni 		else {	    /* for T5 */
3784*7e6ad469SVishal Kulkarni 			value = G_HASHTIDSIZE(value);
3785*7e6ad469SVishal Kulkarni 			value = 1 << value;
3786*7e6ad469SVishal Kulkarni 			tcam_region.max_tid = value +
3787*7e6ad469SVishal Kulkarni 				tcam_region.tid_hash_base;
3788*7e6ad469SVishal Kulkarni 		}
3789*7e6ad469SVishal Kulkarni 	} else	 /* hash not enabled */
3790*7e6ad469SVishal Kulkarni 		tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3791*7e6ad469SVishal Kulkarni 
3792*7e6ad469SVishal Kulkarni 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3793*7e6ad469SVishal Kulkarni 	size += sizeof(struct cudbg_tcam);
3794*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3795*7e6ad469SVishal Kulkarni 
3796*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3797*7e6ad469SVishal Kulkarni 	if (rc)
3798*7e6ad469SVishal Kulkarni 		goto err;
3799*7e6ad469SVishal Kulkarni 
3800*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3801*7e6ad469SVishal Kulkarni 	if (rc)
3802*7e6ad469SVishal Kulkarni 		goto err;
3803*7e6ad469SVishal Kulkarni 
3804*7e6ad469SVishal Kulkarni 	memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3805*7e6ad469SVishal Kulkarni 
3806*7e6ad469SVishal Kulkarni 	tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3807*7e6ad469SVishal Kulkarni 					     scratch_buff.data) + 1);
3808*7e6ad469SVishal Kulkarni 	bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3809*7e6ad469SVishal Kulkarni 	bytes = sizeof(struct cudbg_tcam);
3810*7e6ad469SVishal Kulkarni 
3811*7e6ad469SVishal Kulkarni 	/* read all tid */
3812*7e6ad469SVishal Kulkarni 	for (i = 0; i < tcam_region.max_tid; i++) {
3813*7e6ad469SVishal Kulkarni 		if (bytes_left < sizeof(struct cudbg_tid_data)) {
3814*7e6ad469SVishal Kulkarni 			scratch_buff.size = bytes;
3815*7e6ad469SVishal Kulkarni 			rc = compress_buff(&scratch_buff, dbg_buff);
3816*7e6ad469SVishal Kulkarni 			if (rc)
3817*7e6ad469SVishal Kulkarni 				goto err1;
3818*7e6ad469SVishal Kulkarni 			scratch_buff.size = CUDBG_CHUNK_SIZE;
3819*7e6ad469SVishal Kulkarni 			release_scratch_buff(&scratch_buff, dbg_buff);
3820*7e6ad469SVishal Kulkarni 
3821*7e6ad469SVishal Kulkarni 			/* new alloc */
3822*7e6ad469SVishal Kulkarni 			rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3823*7e6ad469SVishal Kulkarni 					      &scratch_buff);
3824*7e6ad469SVishal Kulkarni 			if (rc)
3825*7e6ad469SVishal Kulkarni 				goto err;
3826*7e6ad469SVishal Kulkarni 
3827*7e6ad469SVishal Kulkarni 			tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3828*7e6ad469SVishal Kulkarni 			bytes_left = CUDBG_CHUNK_SIZE;
3829*7e6ad469SVishal Kulkarni 			bytes = 0;
3830*7e6ad469SVishal Kulkarni 		}
3831*7e6ad469SVishal Kulkarni 
3832*7e6ad469SVishal Kulkarni 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
3833*7e6ad469SVishal Kulkarni 
3834*7e6ad469SVishal Kulkarni 		if (rc) {
3835*7e6ad469SVishal Kulkarni 			cudbg_err->sys_err = rc;
3836*7e6ad469SVishal Kulkarni 			goto err1;
3837*7e6ad469SVishal Kulkarni 		}
3838*7e6ad469SVishal Kulkarni 
3839*7e6ad469SVishal Kulkarni 		tid_data++;
3840*7e6ad469SVishal Kulkarni 		bytes_left -= sizeof(struct cudbg_tid_data);
3841*7e6ad469SVishal Kulkarni 		bytes += sizeof(struct cudbg_tid_data);
3842*7e6ad469SVishal Kulkarni 	}
3843*7e6ad469SVishal Kulkarni 
3844*7e6ad469SVishal Kulkarni 	if (bytes) {
3845*7e6ad469SVishal Kulkarni 		scratch_buff.size = bytes;
3846*7e6ad469SVishal Kulkarni 		rc = compress_buff(&scratch_buff, dbg_buff);
3847*7e6ad469SVishal Kulkarni 	}
3848*7e6ad469SVishal Kulkarni 
3849*7e6ad469SVishal Kulkarni err1:
3850*7e6ad469SVishal Kulkarni 	scratch_buff.size = CUDBG_CHUNK_SIZE;
3851*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3852*7e6ad469SVishal Kulkarni err:
3853*7e6ad469SVishal Kulkarni 	return rc;
3854*7e6ad469SVishal Kulkarni }
3855*7e6ad469SVishal Kulkarni 
3856*7e6ad469SVishal Kulkarni static int
3857*7e6ad469SVishal Kulkarni collect_ma_indirect(struct cudbg_init *pdbg_init,
3858*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
3859*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
3860*7e6ad469SVishal Kulkarni {
3861*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3862*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3863*7e6ad469SVishal Kulkarni 	struct ireg_buf *ma_indr = NULL;
3864*7e6ad469SVishal Kulkarni 	u32 size, j;
3865*7e6ad469SVishal Kulkarni 	int i, rc, n;
3866*7e6ad469SVishal Kulkarni 
3867*7e6ad469SVishal Kulkarni 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) {
3868*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
3869*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
3870*7e6ad469SVishal Kulkarni 					 "MA indirect available only in T6\n");
3871*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3872*7e6ad469SVishal Kulkarni 		goto err;
3873*7e6ad469SVishal Kulkarni 	}
3874*7e6ad469SVishal Kulkarni 
3875*7e6ad469SVishal Kulkarni 	n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3876*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * n * 2;
3877*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3878*7e6ad469SVishal Kulkarni 
3879*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3880*7e6ad469SVishal Kulkarni 	if (rc)
3881*7e6ad469SVishal Kulkarni 		goto err;
3882*7e6ad469SVishal Kulkarni 
3883*7e6ad469SVishal Kulkarni 	ma_indr = (struct ireg_buf *)scratch_buff.data;
3884*7e6ad469SVishal Kulkarni 
3885*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3886*7e6ad469SVishal Kulkarni 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3887*7e6ad469SVishal Kulkarni 		u32 *buff = ma_indr->outbuf;
3888*7e6ad469SVishal Kulkarni 
3889*7e6ad469SVishal Kulkarni 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3890*7e6ad469SVishal Kulkarni 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3891*7e6ad469SVishal Kulkarni 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3892*7e6ad469SVishal Kulkarni 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3893*7e6ad469SVishal Kulkarni 
3894*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3895*7e6ad469SVishal Kulkarni 				 buff, ma_fli->ireg_offset_range,
3896*7e6ad469SVishal Kulkarni 				 ma_fli->ireg_local_offset);
3897*7e6ad469SVishal Kulkarni 
3898*7e6ad469SVishal Kulkarni 		ma_indr++;
3899*7e6ad469SVishal Kulkarni 
3900*7e6ad469SVishal Kulkarni 	}
3901*7e6ad469SVishal Kulkarni 
3902*7e6ad469SVishal Kulkarni 	n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3903*7e6ad469SVishal Kulkarni 
3904*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3905*7e6ad469SVishal Kulkarni 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3906*7e6ad469SVishal Kulkarni 		u32 *buff = ma_indr->outbuf;
3907*7e6ad469SVishal Kulkarni 
3908*7e6ad469SVishal Kulkarni 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3909*7e6ad469SVishal Kulkarni 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3910*7e6ad469SVishal Kulkarni 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3911*7e6ad469SVishal Kulkarni 
3912*7e6ad469SVishal Kulkarni 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3913*7e6ad469SVishal Kulkarni 			t4_read_indirect(padap, ma_fli->ireg_addr,
3914*7e6ad469SVishal Kulkarni 					 ma_fli->ireg_data, buff, 1,
3915*7e6ad469SVishal Kulkarni 					 ma_fli->ireg_local_offset);
3916*7e6ad469SVishal Kulkarni 			buff++;
3917*7e6ad469SVishal Kulkarni 			ma_fli->ireg_local_offset += 0x20;
3918*7e6ad469SVishal Kulkarni 		}
3919*7e6ad469SVishal Kulkarni 		ma_indr++;
3920*7e6ad469SVishal Kulkarni 	}
3921*7e6ad469SVishal Kulkarni 
3922*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3923*7e6ad469SVishal Kulkarni 	if (rc)
3924*7e6ad469SVishal Kulkarni 		goto err1;
3925*7e6ad469SVishal Kulkarni 
3926*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3927*7e6ad469SVishal Kulkarni 
3928*7e6ad469SVishal Kulkarni err1:
3929*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3930*7e6ad469SVishal Kulkarni err:
3931*7e6ad469SVishal Kulkarni 	return rc;
3932*7e6ad469SVishal Kulkarni }
3933*7e6ad469SVishal Kulkarni 
3934*7e6ad469SVishal Kulkarni static int
3935*7e6ad469SVishal Kulkarni collect_hma_indirect(struct cudbg_init *pdbg_init,
3936*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
3937*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
3938*7e6ad469SVishal Kulkarni {
3939*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3940*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3941*7e6ad469SVishal Kulkarni 	struct ireg_buf *hma_indr = NULL;
3942*7e6ad469SVishal Kulkarni 	u32 size;
3943*7e6ad469SVishal Kulkarni 	int i, rc, n;
3944*7e6ad469SVishal Kulkarni 
3945*7e6ad469SVishal Kulkarni 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) {
3946*7e6ad469SVishal Kulkarni 		if (pdbg_init->verbose)
3947*7e6ad469SVishal Kulkarni 			pdbg_init->print(padap->dip, CE_NOTE,
3948*7e6ad469SVishal Kulkarni 					 "HMA indirect available only in T6\n");
3949*7e6ad469SVishal Kulkarni 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3950*7e6ad469SVishal Kulkarni 		goto err;
3951*7e6ad469SVishal Kulkarni 	}
3952*7e6ad469SVishal Kulkarni 
3953*7e6ad469SVishal Kulkarni 	n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3954*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * n;
3955*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
3956*7e6ad469SVishal Kulkarni 
3957*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3958*7e6ad469SVishal Kulkarni 	if (rc)
3959*7e6ad469SVishal Kulkarni 		goto err;
3960*7e6ad469SVishal Kulkarni 
3961*7e6ad469SVishal Kulkarni 	hma_indr = (struct ireg_buf *)scratch_buff.data;
3962*7e6ad469SVishal Kulkarni 
3963*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
3964*7e6ad469SVishal Kulkarni 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
3965*7e6ad469SVishal Kulkarni 		u32 *buff = hma_indr->outbuf;
3966*7e6ad469SVishal Kulkarni 
3967*7e6ad469SVishal Kulkarni 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3968*7e6ad469SVishal Kulkarni 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3969*7e6ad469SVishal Kulkarni 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3970*7e6ad469SVishal Kulkarni 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3971*7e6ad469SVishal Kulkarni 
3972*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3973*7e6ad469SVishal Kulkarni 				 buff, hma_fli->ireg_offset_range,
3974*7e6ad469SVishal Kulkarni 				 hma_fli->ireg_local_offset);
3975*7e6ad469SVishal Kulkarni 
3976*7e6ad469SVishal Kulkarni 		hma_indr++;
3977*7e6ad469SVishal Kulkarni 
3978*7e6ad469SVishal Kulkarni 	}
3979*7e6ad469SVishal Kulkarni 
3980*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3981*7e6ad469SVishal Kulkarni 	if (rc)
3982*7e6ad469SVishal Kulkarni 		goto err1;
3983*7e6ad469SVishal Kulkarni 
3984*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
3985*7e6ad469SVishal Kulkarni 
3986*7e6ad469SVishal Kulkarni err1:
3987*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
3988*7e6ad469SVishal Kulkarni err:
3989*7e6ad469SVishal Kulkarni 	return rc;
3990*7e6ad469SVishal Kulkarni }
3991*7e6ad469SVishal Kulkarni 
3992*7e6ad469SVishal Kulkarni static int
3993*7e6ad469SVishal Kulkarni collect_pcie_indirect(struct cudbg_init *pdbg_init,
3994*7e6ad469SVishal Kulkarni 		      struct cudbg_buffer *dbg_buff,
3995*7e6ad469SVishal Kulkarni 		      struct cudbg_error *cudbg_err)
3996*7e6ad469SVishal Kulkarni {
3997*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
3998*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
3999*7e6ad469SVishal Kulkarni 	struct ireg_buf *ch_pcie;
4000*7e6ad469SVishal Kulkarni 	u32 size;
4001*7e6ad469SVishal Kulkarni 	int i, rc, n;
4002*7e6ad469SVishal Kulkarni 
4003*7e6ad469SVishal Kulkarni 	n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4004*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * n * 2;
4005*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
4006*7e6ad469SVishal Kulkarni 
4007*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4008*7e6ad469SVishal Kulkarni 	if (rc)
4009*7e6ad469SVishal Kulkarni 		goto err;
4010*7e6ad469SVishal Kulkarni 
4011*7e6ad469SVishal Kulkarni 	ch_pcie = (struct ireg_buf *)scratch_buff.data;
4012*7e6ad469SVishal Kulkarni 
4013*7e6ad469SVishal Kulkarni 	/*PCIE_PDBG*/
4014*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
4015*7e6ad469SVishal Kulkarni 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4016*7e6ad469SVishal Kulkarni 		u32 *buff = ch_pcie->outbuf;
4017*7e6ad469SVishal Kulkarni 
4018*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4019*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4020*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4021*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4022*7e6ad469SVishal Kulkarni 
4023*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap,
4024*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_addr,
4025*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_data,
4026*7e6ad469SVishal Kulkarni 				buff,
4027*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_offset_range,
4028*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_local_offset);
4029*7e6ad469SVishal Kulkarni 
4030*7e6ad469SVishal Kulkarni 		ch_pcie++;
4031*7e6ad469SVishal Kulkarni 	}
4032*7e6ad469SVishal Kulkarni 
4033*7e6ad469SVishal Kulkarni 	/*PCIE_CDBG*/
4034*7e6ad469SVishal Kulkarni 	n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4035*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
4036*7e6ad469SVishal Kulkarni 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4037*7e6ad469SVishal Kulkarni 		u32 *buff = ch_pcie->outbuf;
4038*7e6ad469SVishal Kulkarni 
4039*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4040*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4041*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4042*7e6ad469SVishal Kulkarni 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4043*7e6ad469SVishal Kulkarni 
4044*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap,
4045*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_addr,
4046*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_data,
4047*7e6ad469SVishal Kulkarni 				buff,
4048*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_offset_range,
4049*7e6ad469SVishal Kulkarni 				pcie_pio->ireg_local_offset);
4050*7e6ad469SVishal Kulkarni 
4051*7e6ad469SVishal Kulkarni 		ch_pcie++;
4052*7e6ad469SVishal Kulkarni 	}
4053*7e6ad469SVishal Kulkarni 
4054*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4055*7e6ad469SVishal Kulkarni 	if (rc)
4056*7e6ad469SVishal Kulkarni 		goto err1;
4057*7e6ad469SVishal Kulkarni 
4058*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
4059*7e6ad469SVishal Kulkarni 
4060*7e6ad469SVishal Kulkarni err1:
4061*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
4062*7e6ad469SVishal Kulkarni err:
4063*7e6ad469SVishal Kulkarni 	return rc;
4064*7e6ad469SVishal Kulkarni 
4065*7e6ad469SVishal Kulkarni }
4066*7e6ad469SVishal Kulkarni 
4067*7e6ad469SVishal Kulkarni static int
4068*7e6ad469SVishal Kulkarni collect_tp_indirect(struct cudbg_init *pdbg_init,
4069*7e6ad469SVishal Kulkarni 		    struct cudbg_buffer *dbg_buff,
4070*7e6ad469SVishal Kulkarni 		    struct cudbg_error *cudbg_err)
4071*7e6ad469SVishal Kulkarni {
4072*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
4073*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
4074*7e6ad469SVishal Kulkarni 	struct ireg_buf *ch_tp_pio;
4075*7e6ad469SVishal Kulkarni 	u32 size;
4076*7e6ad469SVishal Kulkarni 	int i, rc, n = 0;
4077*7e6ad469SVishal Kulkarni 
4078*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip))
4079*7e6ad469SVishal Kulkarni 		n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4080*7e6ad469SVishal Kulkarni 	else if (is_t6(padap->params.chip))
4081*7e6ad469SVishal Kulkarni 		n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4082*7e6ad469SVishal Kulkarni 
4083*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * n * 3;
4084*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
4085*7e6ad469SVishal Kulkarni 
4086*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4087*7e6ad469SVishal Kulkarni 	if (rc)
4088*7e6ad469SVishal Kulkarni 		goto err;
4089*7e6ad469SVishal Kulkarni 
4090*7e6ad469SVishal Kulkarni 	ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4091*7e6ad469SVishal Kulkarni 
4092*7e6ad469SVishal Kulkarni 	/* TP_PIO*/
4093*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
4094*7e6ad469SVishal Kulkarni 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4095*7e6ad469SVishal Kulkarni 		u32 *buff = ch_tp_pio->outbuf;
4096*7e6ad469SVishal Kulkarni 
4097*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
4098*7e6ad469SVishal Kulkarni 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4099*7e6ad469SVishal Kulkarni 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
4100*7e6ad469SVishal Kulkarni 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4101*7e6ad469SVishal Kulkarni 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4102*7e6ad469SVishal Kulkarni 		} else if (is_t6(padap->params.chip)) {
4103*7e6ad469SVishal Kulkarni 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4104*7e6ad469SVishal Kulkarni 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
4105*7e6ad469SVishal Kulkarni 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4106*7e6ad469SVishal Kulkarni 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4107*7e6ad469SVishal Kulkarni 		}
4108*7e6ad469SVishal Kulkarni 
4109*7e6ad469SVishal Kulkarni 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4110*7e6ad469SVishal Kulkarni 			       tp_pio->ireg_local_offset, true);
4111*7e6ad469SVishal Kulkarni 
4112*7e6ad469SVishal Kulkarni 		ch_tp_pio++;
4113*7e6ad469SVishal Kulkarni 	}
4114*7e6ad469SVishal Kulkarni 
4115*7e6ad469SVishal Kulkarni 	/* TP_TM_PIO*/
4116*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip))
4117*7e6ad469SVishal Kulkarni 		n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4118*7e6ad469SVishal Kulkarni 	else if (is_t6(padap->params.chip))
4119*7e6ad469SVishal Kulkarni 		n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4120*7e6ad469SVishal Kulkarni 
4121*7e6ad469SVishal Kulkarni 	for (i = 0; i < n; i++) {
4122*7e6ad469SVishal Kulkarni 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4123*7e6ad469SVishal Kulkarni 		u32 *buff = ch_tp_pio->outbuf;
4124*7e6ad469SVishal Kulkarni 
4125*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
4126*7e6ad469SVishal Kulkarni 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4127*7e6ad469SVishal Kulkarni 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4128*7e6ad469SVishal Kulkarni 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4129*7e6ad469SVishal Kulkarni 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4130*7e6ad469SVishal Kulkarni 		} else if (is_t6(padap->params.chip)) {
4131*7e6ad469SVishal Kulkarni 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4132*7e6ad469SVishal Kulkarni 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4133*7e6ad469SVishal Kulkarni 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4134*7e6ad469SVishal Kulkarni 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4135*7e6ad469SVishal Kulkarni 		}
4136*7e6ad469SVishal Kulkarni 
4137*7e6ad469SVishal Kulkarni 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4138*7e6ad469SVishal Kulkarni 				  tp_pio->ireg_local_offset, true);
4139*7e6ad469SVishal Kulkarni 
4140*7e6ad469SVishal Kulkarni 		ch_tp_pio++;
4141*7e6ad469SVishal Kulkarni 	}
4142*7e6ad469SVishal Kulkarni 
4143*7e6ad469SVishal Kulkarni 	/* TP_MIB_INDEX*/
4144*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip))
4145*7e6ad469SVishal Kulkarni 		n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4146*7e6ad469SVishal Kulkarni 	else if (is_t6(padap->params.chip))
4147*7e6ad469SVishal Kulkarni 		n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4148*7e6ad469SVishal Kulkarni 
4149*7e6ad469SVishal Kulkarni 	for (i = 0; i < n ; i++) {
4150*7e6ad469SVishal Kulkarni 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4151*7e6ad469SVishal Kulkarni 		u32 *buff = ch_tp_pio->outbuf;
4152*7e6ad469SVishal Kulkarni 
4153*7e6ad469SVishal Kulkarni 		if (is_t5(padap->params.chip)) {
4154*7e6ad469SVishal Kulkarni 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4155*7e6ad469SVishal Kulkarni 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4156*7e6ad469SVishal Kulkarni 			tp_pio->ireg_local_offset =
4157*7e6ad469SVishal Kulkarni 				t5_tp_mib_index_array[i][2];
4158*7e6ad469SVishal Kulkarni 			tp_pio->ireg_offset_range =
4159*7e6ad469SVishal Kulkarni 				t5_tp_mib_index_array[i][3];
4160*7e6ad469SVishal Kulkarni 		} else if (is_t6(padap->params.chip)) {
4161*7e6ad469SVishal Kulkarni 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4162*7e6ad469SVishal Kulkarni 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4163*7e6ad469SVishal Kulkarni 			tp_pio->ireg_local_offset =
4164*7e6ad469SVishal Kulkarni 				t6_tp_mib_index_array[i][2];
4165*7e6ad469SVishal Kulkarni 			tp_pio->ireg_offset_range =
4166*7e6ad469SVishal Kulkarni 				t6_tp_mib_index_array[i][3];
4167*7e6ad469SVishal Kulkarni 		}
4168*7e6ad469SVishal Kulkarni 
4169*7e6ad469SVishal Kulkarni 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4170*7e6ad469SVishal Kulkarni 			       tp_pio->ireg_local_offset, true);
4171*7e6ad469SVishal Kulkarni 
4172*7e6ad469SVishal Kulkarni 		ch_tp_pio++;
4173*7e6ad469SVishal Kulkarni 	}
4174*7e6ad469SVishal Kulkarni 
4175*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4176*7e6ad469SVishal Kulkarni 	if (rc)
4177*7e6ad469SVishal Kulkarni 		goto err1;
4178*7e6ad469SVishal Kulkarni 
4179*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
4180*7e6ad469SVishal Kulkarni 
4181*7e6ad469SVishal Kulkarni err1:
4182*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
4183*7e6ad469SVishal Kulkarni err:
4184*7e6ad469SVishal Kulkarni 	return rc;
4185*7e6ad469SVishal Kulkarni }
4186*7e6ad469SVishal Kulkarni 
4187*7e6ad469SVishal Kulkarni static int
4188*7e6ad469SVishal Kulkarni collect_sge_indirect(struct cudbg_init *pdbg_init,
4189*7e6ad469SVishal Kulkarni 		     struct cudbg_buffer *dbg_buff,
4190*7e6ad469SVishal Kulkarni 		     struct cudbg_error *cudbg_err)
4191*7e6ad469SVishal Kulkarni {
4192*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
4193*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
4194*7e6ad469SVishal Kulkarni 	struct ireg_buf *ch_sge_dbg;
4195*7e6ad469SVishal Kulkarni 	u32 size;
4196*7e6ad469SVishal Kulkarni 	int i, rc;
4197*7e6ad469SVishal Kulkarni 
4198*7e6ad469SVishal Kulkarni 	size = sizeof(struct ireg_buf) * 2;
4199*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
4200*7e6ad469SVishal Kulkarni 
4201*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4202*7e6ad469SVishal Kulkarni 	if (rc)
4203*7e6ad469SVishal Kulkarni 		goto err;
4204*7e6ad469SVishal Kulkarni 
4205*7e6ad469SVishal Kulkarni 	ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4206*7e6ad469SVishal Kulkarni 
4207*7e6ad469SVishal Kulkarni 	for (i = 0; i < 2; i++) {
4208*7e6ad469SVishal Kulkarni 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4209*7e6ad469SVishal Kulkarni 		u32 *buff = ch_sge_dbg->outbuf;
4210*7e6ad469SVishal Kulkarni 
4211*7e6ad469SVishal Kulkarni 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4212*7e6ad469SVishal Kulkarni 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4213*7e6ad469SVishal Kulkarni 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4214*7e6ad469SVishal Kulkarni 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4215*7e6ad469SVishal Kulkarni 
4216*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap,
4217*7e6ad469SVishal Kulkarni 				sge_pio->ireg_addr,
4218*7e6ad469SVishal Kulkarni 				sge_pio->ireg_data,
4219*7e6ad469SVishal Kulkarni 				buff,
4220*7e6ad469SVishal Kulkarni 				sge_pio->ireg_offset_range,
4221*7e6ad469SVishal Kulkarni 				sge_pio->ireg_local_offset);
4222*7e6ad469SVishal Kulkarni 
4223*7e6ad469SVishal Kulkarni 		ch_sge_dbg++;
4224*7e6ad469SVishal Kulkarni 	}
4225*7e6ad469SVishal Kulkarni 
4226*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4227*7e6ad469SVishal Kulkarni 	if (rc)
4228*7e6ad469SVishal Kulkarni 		goto err1;
4229*7e6ad469SVishal Kulkarni 
4230*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
4231*7e6ad469SVishal Kulkarni 
4232*7e6ad469SVishal Kulkarni err1:
4233*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
4234*7e6ad469SVishal Kulkarni err:
4235*7e6ad469SVishal Kulkarni 	return rc;
4236*7e6ad469SVishal Kulkarni }
4237*7e6ad469SVishal Kulkarni 
4238*7e6ad469SVishal Kulkarni static int
4239*7e6ad469SVishal Kulkarni collect_full(struct cudbg_init *pdbg_init,
4240*7e6ad469SVishal Kulkarni 	     struct cudbg_buffer *dbg_buff,
4241*7e6ad469SVishal Kulkarni 	     struct cudbg_error *cudbg_err)
4242*7e6ad469SVishal Kulkarni {
4243*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
4244*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
4245*7e6ad469SVishal Kulkarni 	u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4246*7e6ad469SVishal Kulkarni 	u32 *sp;
4247*7e6ad469SVishal Kulkarni 	int rc;
4248*7e6ad469SVishal Kulkarni 	int nreg = 0;
4249*7e6ad469SVishal Kulkarni 
4250*7e6ad469SVishal Kulkarni 	/* Collect Registers:
4251*7e6ad469SVishal Kulkarni 	 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4252*7e6ad469SVishal Kulkarni 	 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4253*7e6ad469SVishal Kulkarni 	 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4254*7e6ad469SVishal Kulkarni 	 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4255*7e6ad469SVishal Kulkarni 	 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4256*7e6ad469SVishal Kulkarni 	 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4257*7e6ad469SVishal Kulkarni 	 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4258*7e6ad469SVishal Kulkarni 	 **/
4259*7e6ad469SVishal Kulkarni 
4260*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip))
4261*7e6ad469SVishal Kulkarni 		nreg = 6;
4262*7e6ad469SVishal Kulkarni 	else if (is_t6(padap->params.chip))
4263*7e6ad469SVishal Kulkarni 		nreg = 7;
4264*7e6ad469SVishal Kulkarni 
4265*7e6ad469SVishal Kulkarni 	scratch_buff.size = nreg * sizeof(u32);
4266*7e6ad469SVishal Kulkarni 
4267*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4268*7e6ad469SVishal Kulkarni 	if (rc)
4269*7e6ad469SVishal Kulkarni 		goto err;
4270*7e6ad469SVishal Kulkarni 
4271*7e6ad469SVishal Kulkarni 	sp = (u32 *)scratch_buff.data;
4272*7e6ad469SVishal Kulkarni 
4273*7e6ad469SVishal Kulkarni 	/* TP_DBG_SCHED_TX */
4274*7e6ad469SVishal Kulkarni 	reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4275*7e6ad469SVishal Kulkarni 	reg_offset_range = 1;
4276*7e6ad469SVishal Kulkarni 
4277*7e6ad469SVishal Kulkarni 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4278*7e6ad469SVishal Kulkarni 
4279*7e6ad469SVishal Kulkarni 	sp++;
4280*7e6ad469SVishal Kulkarni 
4281*7e6ad469SVishal Kulkarni 	/* TP_DBG_SCHED_RX */
4282*7e6ad469SVishal Kulkarni 	reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4283*7e6ad469SVishal Kulkarni 	reg_offset_range = 1;
4284*7e6ad469SVishal Kulkarni 
4285*7e6ad469SVishal Kulkarni 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4286*7e6ad469SVishal Kulkarni 
4287*7e6ad469SVishal Kulkarni 	sp++;
4288*7e6ad469SVishal Kulkarni 
4289*7e6ad469SVishal Kulkarni 	/* TP_DBG_CSIDE_INT */
4290*7e6ad469SVishal Kulkarni 	reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4291*7e6ad469SVishal Kulkarni 	reg_offset_range = 1;
4292*7e6ad469SVishal Kulkarni 
4293*7e6ad469SVishal Kulkarni 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4294*7e6ad469SVishal Kulkarni 
4295*7e6ad469SVishal Kulkarni 	sp++;
4296*7e6ad469SVishal Kulkarni 
4297*7e6ad469SVishal Kulkarni 	/* TP_DBG_ESIDE_INT */
4298*7e6ad469SVishal Kulkarni 	reg_local_offset = t5_tp_pio_array[8][2] + 3;
4299*7e6ad469SVishal Kulkarni 	reg_offset_range = 1;
4300*7e6ad469SVishal Kulkarni 
4301*7e6ad469SVishal Kulkarni 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4302*7e6ad469SVishal Kulkarni 
4303*7e6ad469SVishal Kulkarni 	sp++;
4304*7e6ad469SVishal Kulkarni 
4305*7e6ad469SVishal Kulkarni 	/* PCIE_CDEBUG_INDEX[AppData0] */
4306*7e6ad469SVishal Kulkarni 	reg_addr = t5_pcie_cdbg_array[0][0];
4307*7e6ad469SVishal Kulkarni 	reg_data = t5_pcie_cdbg_array[0][1];
4308*7e6ad469SVishal Kulkarni 	reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4309*7e6ad469SVishal Kulkarni 	reg_offset_range = 1;
4310*7e6ad469SVishal Kulkarni 
4311*7e6ad469SVishal Kulkarni 	t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4312*7e6ad469SVishal Kulkarni 			 reg_local_offset);
4313*7e6ad469SVishal Kulkarni 
4314*7e6ad469SVishal Kulkarni 	sp++;
4315*7e6ad469SVishal Kulkarni 
4316*7e6ad469SVishal Kulkarni 	if (is_t6(padap->params.chip)) {
4317*7e6ad469SVishal Kulkarni 		/* PCIE_CDEBUG_INDEX[AppData1] */
4318*7e6ad469SVishal Kulkarni 		reg_addr = t5_pcie_cdbg_array[0][0];
4319*7e6ad469SVishal Kulkarni 		reg_data = t5_pcie_cdbg_array[0][1];
4320*7e6ad469SVishal Kulkarni 		reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4321*7e6ad469SVishal Kulkarni 		reg_offset_range = 1;
4322*7e6ad469SVishal Kulkarni 
4323*7e6ad469SVishal Kulkarni 		t4_read_indirect(padap, reg_addr, reg_data, sp,
4324*7e6ad469SVishal Kulkarni 				 reg_offset_range, reg_local_offset);
4325*7e6ad469SVishal Kulkarni 
4326*7e6ad469SVishal Kulkarni 		sp++;
4327*7e6ad469SVishal Kulkarni 	}
4328*7e6ad469SVishal Kulkarni 
4329*7e6ad469SVishal Kulkarni 	/* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4330*7e6ad469SVishal Kulkarni 	*sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4331*7e6ad469SVishal Kulkarni 
4332*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4333*7e6ad469SVishal Kulkarni 	if (rc)
4334*7e6ad469SVishal Kulkarni 		goto err1;
4335*7e6ad469SVishal Kulkarni 
4336*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
4337*7e6ad469SVishal Kulkarni 
4338*7e6ad469SVishal Kulkarni err1:
4339*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
4340*7e6ad469SVishal Kulkarni err:
4341*7e6ad469SVishal Kulkarni 	return rc;
4342*7e6ad469SVishal Kulkarni }
4343*7e6ad469SVishal Kulkarni 
4344*7e6ad469SVishal Kulkarni static int
4345*7e6ad469SVishal Kulkarni collect_vpd_data(struct cudbg_init *pdbg_init,
4346*7e6ad469SVishal Kulkarni 		 struct cudbg_buffer *dbg_buff,
4347*7e6ad469SVishal Kulkarni 		 struct cudbg_error *cudbg_err)
4348*7e6ad469SVishal Kulkarni {
4349*7e6ad469SVishal Kulkarni #ifdef notyet
4350*7e6ad469SVishal Kulkarni 	struct cudbg_buffer scratch_buff;
4351*7e6ad469SVishal Kulkarni 	struct adapter *padap = pdbg_init->adap;
4352*7e6ad469SVishal Kulkarni 	struct struct_vpd_data *vpd_data;
4353*7e6ad469SVishal Kulkarni 	char vpd_ver[4];
4354*7e6ad469SVishal Kulkarni 	u32 fw_vers;
4355*7e6ad469SVishal Kulkarni 	u32 size;
4356*7e6ad469SVishal Kulkarni 	int rc;
4357*7e6ad469SVishal Kulkarni 
4358*7e6ad469SVishal Kulkarni 	size = sizeof(struct struct_vpd_data);
4359*7e6ad469SVishal Kulkarni 	scratch_buff.size = size;
4360*7e6ad469SVishal Kulkarni 
4361*7e6ad469SVishal Kulkarni 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4362*7e6ad469SVishal Kulkarni 	if (rc)
4363*7e6ad469SVishal Kulkarni 		goto err;
4364*7e6ad469SVishal Kulkarni 
4365*7e6ad469SVishal Kulkarni 	vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4366*7e6ad469SVishal Kulkarni 
4367*7e6ad469SVishal Kulkarni 	if (is_t5(padap->params.chip)) {
4368*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4369*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4370*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4371*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4372*7e6ad469SVishal Kulkarni 	} else if (is_t6(padap->params.chip)) {
4373*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4374*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4375*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4376*7e6ad469SVishal Kulkarni 		read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4377*7e6ad469SVishal Kulkarni 	}
4378*7e6ad469SVishal Kulkarni 
4379*7e6ad469SVishal Kulkarni 	if (is_fw_attached(pdbg_init)) {
4380*7e6ad469SVishal Kulkarni 	   rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4381*7e6ad469SVishal Kulkarni 	} else {
4382*7e6ad469SVishal Kulkarni 		rc = 1;
4383*7e6ad469SVishal Kulkarni 	}
4384*7e6ad469SVishal Kulkarni 
4385*7e6ad469SVishal Kulkarni 	if (rc) {
4386*7e6ad469SVishal Kulkarni 		/* Now trying with backdoor mechanism */
4387*7e6ad469SVishal Kulkarni 		rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4388*7e6ad469SVishal Kulkarni 				  (u8 *)&vpd_data->scfg_vers);
4389*7e6ad469SVishal Kulkarni 		if (rc)
4390*7e6ad469SVishal Kulkarni 			goto err1;
4391*7e6ad469SVishal Kulkarni 	}
4392*7e6ad469SVishal Kulkarni 
4393*7e6ad469SVishal Kulkarni 	if (is_fw_attached(pdbg_init)) {
4394*7e6ad469SVishal Kulkarni 		rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4395*7e6ad469SVishal Kulkarni 	} else {
4396*7e6ad469SVishal Kulkarni 		rc = 1;
4397*7e6ad469SVishal Kulkarni 	}
4398*7e6ad469SVishal Kulkarni 
4399*7e6ad469SVishal Kulkarni 	if (rc) {
4400*7e6ad469SVishal Kulkarni 		/* Now trying with backdoor mechanism */
4401*7e6ad469SVishal Kulkarni 		rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4402*7e6ad469SVishal Kulkarni 				  (u8 *)vpd_ver);
4403*7e6ad469SVishal Kulkarni 		if (rc)
4404*7e6ad469SVishal Kulkarni 			goto err1;
4405*7e6ad469SVishal Kulkarni 		/* read_vpd_reg return string of stored hex
4406*7e6ad469SVishal Kulkarni 		 * converting hex string to char string
4407*7e6ad469SVishal Kulkarni 		 * vpd version is 2 bytes only */
4408*7e6ad469SVishal Kulkarni 		sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4409*7e6ad469SVishal Kulkarni 		vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4410*7e6ad469SVishal Kulkarni 	}
4411*7e6ad469SVishal Kulkarni 
4412*7e6ad469SVishal Kulkarni 	/* Get FW version if it's not already filled in */
4413*7e6ad469SVishal Kulkarni 	fw_vers = padap->params.fw_vers;
4414*7e6ad469SVishal Kulkarni 	if (!fw_vers) {
4415*7e6ad469SVishal Kulkarni 		rc = t4_get_fw_version(padap, &fw_vers);
4416*7e6ad469SVishal Kulkarni 		if (rc)
4417*7e6ad469SVishal Kulkarni 			goto err1;
4418*7e6ad469SVishal Kulkarni 	}
4419*7e6ad469SVishal Kulkarni 
4420*7e6ad469SVishal Kulkarni 	vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4421*7e6ad469SVishal Kulkarni 	vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4422*7e6ad469SVishal Kulkarni 	vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4423*7e6ad469SVishal Kulkarni 	vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4424*7e6ad469SVishal Kulkarni 
4425*7e6ad469SVishal Kulkarni 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4426*7e6ad469SVishal Kulkarni 	if (rc)
4427*7e6ad469SVishal Kulkarni 		goto err1;
4428*7e6ad469SVishal Kulkarni 
4429*7e6ad469SVishal Kulkarni 	rc = compress_buff(&scratch_buff, dbg_buff);
4430*7e6ad469SVishal Kulkarni 
4431*7e6ad469SVishal Kulkarni err1:
4432*7e6ad469SVishal Kulkarni 	release_scratch_buff(&scratch_buff, dbg_buff);
4433*7e6ad469SVishal Kulkarni err:
4434*7e6ad469SVishal Kulkarni 	return rc;
4435*7e6ad469SVishal Kulkarni #endif
4436*7e6ad469SVishal Kulkarni 	return (-1);
4437*7e6ad469SVishal Kulkarni }
4438