1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 #include <sys/types.h>
26 #include <px_err.h>
27 
28 #include "fabric-xlate.h"
29 
30 #define	EPKT_DESC(b, o, p, c, d) (BLOCK_##b << 16 | OP_##o << 12 | \
31     PH_##p << 8 | CND_##c << 4 | DIR_##d)
32 
33 /* EPKT Table used only for RC/RP errors */
34 typedef struct fab_epkt_tbl {
35 	uint32_t	epkt_desc;
36 	uint32_t	pcie_ue_sts;	/* Equivalent PCIe UE Status */
37 	uint16_t	pci_err_sts;	/* Equivalent PCI Error Status */
38 	uint16_t	pci_bdg_sts;	/* Equivalent PCI Bridge Status */
39 	const char	*tgt_class;	/* Target Ereport Class */
40 } fab_epkt_tbl_t;
41 
42 static fab_epkt_tbl_t fab_epkt_tbl[] = {
43 	EPKT_DESC(MMU, XLAT, DATA, INV, RDWR),
44 	PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB, 0,
45 	EPKT_DESC(MMU, XLAT, ADDR, UNMAP, RDWR),
46 	PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB, 0,
47 	EPKT_DESC(MMU, XLAT, DATA, PROT, RDWR),
48 	PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB, 0,
49 
50 	EPKT_DESC(INTR, MSI32, DATA, ILL, IRR),
51 	PCIE_AER_UCE_MTLP, PCI_STAT_S_SYSERR, 0, 0,
52 
53 	EPKT_DESC(PORT, PIO, IRR, RCA, WRITE),
54 	0, PCI_STAT_S_SYSERR, PCI_STAT_R_TARG_AB, 0,
55 
56 	EPKT_DESC(PORT, PIO, IRR, RUR, WRITE),
57 	0, PCI_STAT_S_SYSERR, PCI_STAT_R_MAST_AB, 0,
58 
59 	EPKT_DESC(PORT, PIO, IRR, INV, RDWR),
60 	PCIE_AER_UCE_MTLP, PCI_STAT_S_SYSERR, 0, 0,
61 
62 	EPKT_DESC(PORT, PIO, IRR, TO, READ),
63 	PCIE_AER_UCE_TO, PCI_STAT_S_SYSERR, 0, PCI_TARG_MA,
64 	EPKT_DESC(PORT, PIO, IRR, TO, WRITE),
65 	PCIE_AER_UCE_TO, PCI_STAT_S_SYSERR, 0, PCI_TARG_MA,
66 
67 	EPKT_DESC(PORT, PIO, IRR, UC, IRR),
68 	PCIE_AER_UCE_UC, PCI_STAT_S_SYSERR, 0, 0,
69 
70 	EPKT_DESC(PORT, LINK, FC, TO, IRR),
71 	PCIE_AER_UCE_FCP, PCI_STAT_S_SYSERR, 0, 0,
72 
73 	0, 0, 0, 0, 0
74 };
75 
76 /* ARGSUSED */
77 void
fab_epkt_to_data(fmd_hdl_t * hdl,nvlist_t * nvl,fab_data_t * data)78 fab_epkt_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data)
79 {
80 	data->nvl = nvl;
81 
82 	/* Always Root Complex */
83 	data->dev_type = PCIE_PCIECAP_DEV_TYPE_ROOT;
84 
85 	data->pcie_ue_sev = (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |
86 	    PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP);
87 }
88 
89 static int
fab_xlate_epkt(fmd_hdl_t * hdl,fab_data_t * data,px_rc_err_t * epktp)90 fab_xlate_epkt(fmd_hdl_t *hdl, fab_data_t *data, px_rc_err_t *epktp)
91 {
92 	fab_epkt_tbl_t *entry;
93 	uint32_t temp;
94 
95 	for (entry = fab_epkt_tbl; entry->epkt_desc != 0; entry++) {
96 		temp = *(uint32_t *)&epktp->rc_descr >> 12;
97 		if (entry->epkt_desc == temp)
98 			goto send;
99 	}
100 
101 	return (0);
102 
103 send:
104 	fmd_hdl_debug(hdl, "Translate epkt DESC = %#x\n", temp);
105 
106 	/* Fill in PCI Status Register */
107 	data->pci_err_status = entry->pci_err_sts;
108 	data->pci_bdg_sec_stat = entry->pci_bdg_sts;
109 
110 	/* Fill in the device status register */
111 	if (epktp->rc_descr.STOP)
112 		data->pcie_err_status = PCIE_DEVSTS_FE_DETECTED;
113 	else if (epktp->rc_descr.C)
114 		data->pcie_err_status = PCIE_DEVSTS_CE_DETECTED;
115 	else
116 		data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
117 
118 	/* Fill in the AER UE register */
119 	data->pcie_ue_status = entry->pcie_ue_sts;
120 
121 	/* Fill in the AER Control register */
122 	temp = entry->pcie_ue_sts;
123 	for (data->pcie_adv_ctl = (uint32_t)-1; temp; data->pcie_adv_ctl++)
124 		temp = temp >> 1;
125 
126 	/* Send target ereports */
127 	data->pcie_ue_no_tgt_erpt = B_TRUE;
128 	if (entry->tgt_class && !epktp->rc_descr.STOP) {
129 		if (epktp->rc_descr.D) {
130 			data->pcie_ue_tgt_trans = PF_ADDR_DMA;
131 			data->pcie_ue_tgt_addr = epktp->addr;
132 		} else if (epktp->rc_descr.M) {
133 			data->pcie_ue_tgt_trans = PF_ADDR_PIO;
134 			data->pcie_ue_tgt_addr = epktp->addr;
135 		}
136 
137 		if (data->pcie_ue_tgt_trans)
138 			fab_send_tgt_erpt(hdl, data, entry->tgt_class,
139 			    B_TRUE);
140 	}
141 	return (1);
142 }
143 
144 void
fab_xlate_epkt_erpts(fmd_hdl_t * hdl,nvlist_t * nvl,const char * class)145 fab_xlate_epkt_erpts(fmd_hdl_t *hdl, nvlist_t *nvl, const char *class)
146 {
147 	fab_data_t data = {0};
148 	px_rc_err_t epkt = {0};
149 	pcie_tlp_hdr_t *tlp_hdr;
150 	void *ptr;
151 	uint8_t ver;
152 	int err;
153 	char *devpath, *rppath = NULL;
154 	nvlist_t *detector;
155 
156 	fmd_hdl_debug(hdl, "epkt ereport received: %s\n", class);
157 	fab_epkt_to_data(hdl, nvl, &data);
158 
159 	err = nvlist_lookup_uint8(nvl, "epkt_ver", &ver);
160 	err |= nvlist_lookup_uint32(nvl, "desc", (uint32_t *)&epkt.rc_descr);
161 	err |= nvlist_lookup_uint32(nvl, "size", &epkt.size);
162 	err |= nvlist_lookup_uint64(nvl, "addr", &epkt.addr);
163 	err |= nvlist_lookup_uint64(nvl, "hdr1", &epkt.hdr[0]);
164 	err |= nvlist_lookup_uint64(nvl, "hdr2", &epkt.hdr[1]);
165 	err |= nvlist_lookup_uint64(nvl, "reserved", &epkt.reserved);
166 
167 	if (err != 0) {
168 		fmd_hdl_debug(hdl, "Failed to retrieve all epkt payloads");
169 		return;
170 	}
171 
172 	fmd_hdl_debug(hdl, "epkt flags: %c%c%c%c%c%c%c%c%c %s",
173 	    epkt.rc_descr.S ? 'S' : '-', epkt.rc_descr.M ? 'M' : '-',
174 	    epkt.rc_descr.S ? 'Q' : '-', epkt.rc_descr.D ? 'D' : '-',
175 	    epkt.rc_descr.R ? 'R' : '-', epkt.rc_descr.H ? 'H' : '-',
176 	    epkt.rc_descr.C ? 'C' : '-', epkt.rc_descr.I ? 'I' : '-',
177 	    epkt.rc_descr.B ? 'B' : '-', epkt.rc_descr.STOP ? "STOP" : "");
178 
179 	/*
180 	 * If the least byte of the 'reserved' is non zero, it is device
181 	 * and function of the port
182 	 */
183 	if (epkt.reserved && 0xff)
184 		rppath = fab_find_rppath_by_df(hdl, nvl, epkt.reserved & 0xff);
185 
186 	if (epkt.rc_descr.H) {
187 		data.pcie_ue_hdr[0] = (uint32_t)(epkt.hdr[0] >> 32);
188 		data.pcie_ue_hdr[1] = (uint32_t)epkt.hdr[0];
189 		data.pcie_ue_hdr[2] = (uint32_t)(epkt.hdr[1] >> 32);
190 		data.pcie_ue_hdr[3] = (uint32_t)(epkt.hdr[1]);
191 
192 		tlp_hdr = (pcie_tlp_hdr_t *)&data.pcie_ue_hdr[0];
193 		ptr = &data.pcie_ue_hdr[1];
194 		switch (tlp_hdr->type) {
195 		case PCIE_TLP_TYPE_IO:
196 		case PCIE_TLP_TYPE_MEM:
197 		case PCIE_TLP_TYPE_MEMLK:
198 		{
199 			pcie_mem64_t *pmp = ptr;
200 			data.pcie_ue_tgt_trans = PF_ADDR_PIO;
201 			data.pcie_ue_tgt_bdf = pmp->rid;
202 			if (tlp_hdr->fmt & 0x1)
203 				data.pcie_ue_tgt_addr =
204 				    ((uint64_t)pmp->addr1 << 32) | pmp->addr0;
205 			else
206 				data.pcie_ue_tgt_addr =
207 				    ((pcie_memio32_t *)ptr)->addr0;
208 
209 			break;
210 		}
211 
212 		case PCIE_TLP_TYPE_CFG0:
213 		case PCIE_TLP_TYPE_CFG1:
214 		{
215 			pcie_cfg_t *pcp = ptr;
216 
217 			data.pcie_ue_tgt_trans = PF_ADDR_CFG;
218 			data.pcie_ue_tgt_bdf =
219 			    (pcp->bus << 8) | (pcp->dev << 3) | pcp->func;
220 			break;
221 		}
222 
223 		case PCIE_TLP_TYPE_CPL:
224 		case PCIE_TLP_TYPE_CPLLK:
225 			data.pcie_ue_tgt_bdf = ((pcie_cpl_t *)ptr)->rid;
226 			break;
227 		}
228 
229 		fmd_hdl_debug(hdl, "HEADER 0 0x%x", data.pcie_ue_hdr[0]);
230 		fmd_hdl_debug(hdl, "HEADER 1 0x%x", data.pcie_ue_hdr[1]);
231 		fmd_hdl_debug(hdl, "HEADER 2 0x%x", data.pcie_ue_hdr[2]);
232 		fmd_hdl_debug(hdl, "HEADER 3 0x%x", data.pcie_ue_hdr[3]);
233 		fmd_hdl_debug(hdl, "In header bdf = %#hx addr = %#llx",
234 		    data.pcie_ue_tgt_bdf,
235 		    (uint64_t)data.pcie_ue_tgt_addr);
236 
237 		/* find the root port to which this error is related */
238 		if (rppath == NULL && data.pcie_ue_tgt_bdf)
239 			rppath = fab_find_rppath_by_devbdf(hdl, nvl,
240 			    data.pcie_ue_tgt_bdf);
241 	}
242 
243 	/* find the root port by address */
244 	if (rppath == NULL && epkt.rc_descr.M != 0) {
245 		devpath = fab_find_addr(hdl, nvl, epkt.addr);
246 		if (devpath) {
247 			rppath = fab_find_rppath_by_devpath(hdl, devpath);
248 			fmd_hdl_strfree(hdl, devpath);
249 		}
250 	}
251 
252 	/*
253 	 * reset the detector in the original ereport to the root port
254 	 */
255 	if (rppath) {
256 		if (nvlist_alloc(&detector, NV_UNIQUE_NAME, 0) != 0) {
257 			fmd_hdl_error(hdl, "failed to allocate nvlist");
258 			fmd_hdl_strfree(hdl, rppath);
259 			return;
260 		}
261 		(void) nvlist_add_string(detector, FM_VERSION,
262 		    FM_DEV_SCHEME_VERSION);
263 		(void) nvlist_add_string(detector, FM_FMRI_SCHEME,
264 		    FM_FMRI_SCHEME_DEV);
265 		(void) nvlist_add_string(detector, FM_FMRI_DEV_PATH, rppath);
266 		(void) nvlist_remove_all(nvl, FM_EREPORT_DETECTOR);
267 		(void) nvlist_add_nvlist(nvl, FM_EREPORT_DETECTOR, detector);
268 		nvlist_free(detector);
269 		fmd_hdl_strfree(hdl, rppath);
270 	} else {
271 		/*
272 		 * We can not locate the root port the error originated from.
273 		 * Likely this is because the original ereport is malformed or
274 		 * the hw error register has corrupted contents.  In this case,
275 		 * the best we can do is send ereports on all root ports.
276 		 *
277 		 * Set pcie_rp_send_all for fab_send_erpt() to process later.
278 		 */
279 		fmd_hdl_debug(hdl, "RP not fond. Will translate on all RPs.\n");
280 		data.pcie_rp_send_all = B_TRUE;
281 	}
282 
283 	(void) fab_xlate_epkt(hdl, &data, &epkt);
284 	fab_xlate_pcie_erpts(hdl, &data);
285 }
286