1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * Stub routines used to link in files from $SRC/common/mc
29 */
30
31#include <sys/types.h>
32#include <sys/cmn_err.h>
33#include <sys/ddi.h>
34#include <sys/sunddi.h>
35#include <sys/varargs.h>
36#include <sys/fm/util.h>
37#include <sys/fm/cpu/AMD.h>
38#include <sys/fm/smb/fmsmb.h>
39#include <sys/fm/protocol.h>
40#include <sys/mc.h>
41#include <sys/smbios.h>
42#include <sys/smbios_impl.h>
43
44#include <mcamd.h>
45#include <mcamd_off.h>
46
47int mcamd_debug = 0; /* see mcamd_api.h for MCAMD_DBG_* values */
48
49extern int x86gentopo_legacy;
50
51struct mc_offmap {
52	int mcom_code;
53	uint_t mcom_offset;
54};
55
56static uint_t
57nodetype(mcamd_node_t *node)
58{
59	mc_hdr_t *mch = (mc_hdr_t *)node;
60	return (mch->mch_type);
61}
62
63static void *
64node2type(mcamd_node_t *node, int type)
65{
66	mc_hdr_t *mch = (mc_hdr_t *)node;
67	ASSERT(mch->mch_type == type);
68	return (mch);
69}
70
71/*
72 * Iterate over all memory controllers.
73 */
74/*ARGSUSED*/
75mcamd_node_t *
76mcamd_mc_next(mcamd_hdl_t *hdl, mcamd_node_t *root, mcamd_node_t *last)
77{
78	mc_t *mc;
79
80	ASSERT(RW_LOCK_HELD(&mc_lock));
81
82	if (last == NULL)
83		return ((mcamd_node_t *)mc_list);
84
85	mc = node2type(last, MC_NT_MC);
86
87	return ((mcamd_node_t *)mc->mc_next);
88}
89
90/*
91 * Iterate over all chip-selects of a MC or all chip-selects of a DIMM
92 * depending on the node type of 'node'.  In the DIMM case we do not
93 * have a linked list of associated chip-selects but an array of pointer
94 * to them.
95 */
96/*ARGSUSED*/
97mcamd_node_t *
98mcamd_cs_next(mcamd_hdl_t *hdl, mcamd_node_t *node, mcamd_node_t *last)
99{
100	uint_t nt = nodetype(node);
101	mc_t *mc;
102	mc_cs_t *mccs;
103	mc_dimm_t *mcd;
104	int i;
105	void *retval;
106
107	ASSERT(nt == MC_NT_MC || nt == MC_NT_DIMM);
108
109	if (last == NULL) {
110		switch (nt) {
111		case MC_NT_MC:
112			mc = node2type(node, MC_NT_MC);
113			retval = mc->mc_cslist;
114			break;
115		case MC_NT_DIMM:
116			mcd = node2type(node, MC_NT_DIMM);
117			retval = mcd->mcd_cs[0];
118			break;
119		}
120	} else {
121		mccs = node2type(last, MC_NT_CS);
122
123		switch (nt) {
124		case MC_NT_MC:
125			retval = mccs->mccs_next;
126			break;
127		case MC_NT_DIMM:
128			mcd = node2type(node, MC_NT_DIMM);
129			for (i = 0; i < MC_CHIP_DIMMRANKMAX; i++) {
130				if (mcd->mcd_cs[i] == mccs)
131					break;
132			}
133			if (i == MC_CHIP_DIMMRANKMAX)
134				cmn_err(CE_PANIC, "Bad last value for "
135				    "mcamd_cs_next");
136
137			if (i == MC_CHIP_DIMMRANKMAX - 1)
138				retval = NULL;
139			else
140				retval = mcd->mcd_cs[i + 1];
141			break;
142		}
143	}
144
145	return ((mcamd_node_t *)retval);
146}
147
148/*
149 * Iterate over all DIMMs of an MC or all DIMMs of a chip-select depending
150 * on the node type of 'node'.  In the chip-select case we do not have
151 * a linked list of associated DIMMs but an array of pointers to them.
152 */
153/*ARGSUSED*/
154mcamd_node_t *
155mcamd_dimm_next(mcamd_hdl_t *hdl, mcamd_node_t *node, mcamd_node_t *last)
156{
157	uint_t nt = nodetype(node);
158	mc_t *mc;
159	mc_cs_t *mccs;
160	mc_dimm_t *mcd;
161	int i;
162	void *retval;
163
164	ASSERT(nt == MC_NT_MC || nt == MC_NT_CS);
165
166	if (last == NULL) {
167		switch (nt) {
168		case MC_NT_MC:
169			mc = node2type(node, MC_NT_MC);
170			retval =  mc->mc_dimmlist;
171			break;
172		case MC_NT_CS:
173			mccs = node2type(node, MC_NT_CS);
174			retval = mccs->mccs_dimm[0];
175			break;
176		}
177	} else {
178		mcd = node2type(last, MC_NT_DIMM);
179
180		switch (nt) {
181		case MC_NT_MC:
182			retval = mcd->mcd_next;
183			break;
184		case MC_NT_CS:
185			mccs = node2type(node, MC_NT_CS);
186			for (i = 0; i < MC_CHIP_DIMMPERCS; i++) {
187				if (mccs->mccs_dimm[i] == mcd)
188					break;
189			}
190			if (i == MC_CHIP_DIMMPERCS)
191				cmn_err(CE_PANIC, "Bad last value for "
192				    "mcamd_dimm_next");
193
194			if (i == MC_CHIP_DIMMPERCS - 1)
195				retval = NULL;
196			else
197				retval = mccs->mccs_dimm[i + 1];
198			break;
199		}
200	}
201
202	return ((mcamd_node_t *)retval);
203}
204
205/*ARGSUSED*/
206mcamd_node_t *
207mcamd_cs_mc(mcamd_hdl_t *hdl, mcamd_node_t *csnode)
208{
209	mc_cs_t *mccs = node2type(csnode, MC_NT_CS);
210	return ((mcamd_node_t *)mccs->mccs_mc);
211}
212
213/*ARGSUSED*/
214mcamd_node_t *
215mcamd_dimm_mc(mcamd_hdl_t *hdl, mcamd_node_t *dnode)
216{
217	mc_dimm_t *mcd = node2type(dnode, MC_NT_DIMM);
218	return ((mcamd_node_t *)mcd->mcd_mc);
219}
220
221/*
222 * Node properties.  A property is accessed through a property number code;
223 * we search these tables for a match (choosing table from node type) and
224 * return the uint64_t property at the indicated offset into the node
225 * structure.  All properties must be of type uint64_t.  It is assumed that
226 * property lookup does not have to be super-fast - we search linearly
227 * down the (small) lists.
228 */
229static const struct mc_offmap mcamd_mc_offmap[] = {
230	{ MCAMD_PROP_NUM, MCAMD_MC_OFF_NUM },
231	{ MCAMD_PROP_REV, MCAMD_MC_OFF_REV },
232	{ MCAMD_PROP_BASE_ADDR, MCAMD_MC_OFF_BASE_ADDR },
233	{ MCAMD_PROP_LIM_ADDR, MCAMD_MC_OFF_LIM_ADDR },
234	{ MCAMD_PROP_ILEN, MCAMD_MC_OFF_ILEN },
235	{ MCAMD_PROP_ILSEL, MCAMD_MC_OFF_ILSEL },
236	{ MCAMD_PROP_CSINTLVFCTR, MCAMD_MC_OFF_CSINTLVFCTR },
237	{ MCAMD_PROP_DRAMHOLE_SIZE, MCAMD_MC_OFF_DRAMHOLE_SIZE },
238	{ MCAMD_PROP_ACCESS_WIDTH, MCAMD_MC_OFF_ACCWIDTH },
239	{ MCAMD_PROP_CSBANKMAPREG, MCAMD_MC_OFF_CSBANKMAPREG },
240	{ MCAMD_PROP_BANKSWZL, MCAMD_MC_OFF_BNKSWZL },
241	{ MCAMD_PROP_MOD64MUX, MCAMD_MC_OFF_MOD64MUX },
242	{ MCAMD_PROP_SPARECS, MCAMD_MC_OFF_SPARECS },
243	{ MCAMD_PROP_BADCS, MCAMD_MC_OFF_BADCS },
244};
245
246static const struct mc_offmap mcamd_cs_offmap[] = {
247	{ MCAMD_PROP_NUM, MCAMD_CS_OFF_NUM },
248	{ MCAMD_PROP_BASE_ADDR, MCAMD_CS_OFF_BASE_ADDR },
249	{ MCAMD_PROP_MASK, MCAMD_CS_OFF_MASK },
250	{ MCAMD_PROP_SIZE, MCAMD_CS_OFF_SIZE },
251	{ MCAMD_PROP_CSBE, MCAMD_CS_OFF_CSBE },
252	{ MCAMD_PROP_SPARE, MCAMD_CS_OFF_SPARE },
253	{ MCAMD_PROP_TESTFAIL, MCAMD_CS_OFF_TESTFAIL },
254	{ MCAMD_PROP_CSDIMM1, MCAMD_CS_OFF_DIMMNUMS },
255	{ MCAMD_PROP_CSDIMM2, MCAMD_CS_OFF_DIMMNUMS +
256	    MCAMD_CS_OFF_DIMMNUMS_INCR },
257	{ MCAMD_PROP_DIMMRANK, MCAMD_CS_OFF_DIMMRANK },
258};
259
260static const struct mc_offmap mcamd_dimm_offmap[] = {
261	{ MCAMD_PROP_NUM, MCAMD_DIMM_OFF_NUM },
262	{ MCAMD_PROP_SIZE, MCAMD_DIMM_OFF_SIZE },
263};
264
265struct nt_offmap {
266	const struct mc_offmap *omp;
267	int mapents;
268};
269
270/*ARGSUSED*/
271static int
272findoffset(mcamd_hdl_t *hdl, mcamd_node_t *node, struct nt_offmap *arr,
273    int code, uint_t *offset)
274{
275	int i;
276	mc_hdr_t *mch = (mc_hdr_t *)node;
277	int nt = mch->mch_type;
278	const struct mc_offmap *omp;
279
280	if (nt > MC_NT_NTYPES || (omp = arr[nt].omp) == NULL)
281		return (0);
282
283	for (i = 0; i < arr[nt].mapents; i++, omp++) {
284		if (omp->mcom_code == code) {
285			*offset = omp->mcom_offset;
286			return (1);
287		}
288	}
289
290	return (0);
291}
292
293/*ARGSUSED*/
294int
295mcamd_get_numprop(mcamd_hdl_t *hdl, mcamd_node_t *node,
296    mcamd_propcode_t code, mcamd_prop_t *valp)
297{
298	int found;
299	uint_t offset;
300
301	struct nt_offmap props[] = {
302		{ mcamd_mc_offmap,	/* MC_NT_MC */
303		    sizeof (mcamd_mc_offmap) / sizeof (struct mc_offmap) },
304		{ mcamd_cs_offmap,	/* MC_NT_CS */
305		    sizeof (mcamd_cs_offmap) / sizeof (struct mc_offmap) },
306		{ mcamd_dimm_offmap,	/* MC_NT_DIMM */
307		    sizeof (mcamd_dimm_offmap) / sizeof (struct mc_offmap) }
308	};
309
310	found = findoffset(hdl, node, &props[0], code, &offset);
311	ASSERT(found);
312
313	if (found)
314		*valp = *(uint64_t *)((uintptr_t)node + offset);
315
316	return (found == 1);
317}
318
319int
320mcamd_get_numprops(mcamd_hdl_t *hdl, ...)
321{
322	va_list ap;
323	mcamd_node_t *node;
324	mcamd_propcode_t code;
325	mcamd_prop_t *valp;
326
327	va_start(ap, hdl);
328	while ((node = va_arg(ap, mcamd_node_t *)) != NULL) {
329		code = va_arg(ap, mcamd_propcode_t);
330		valp = va_arg(ap, mcamd_prop_t *);
331		if (!mcamd_get_numprop(hdl, node, code, valp))
332			return (0);
333	}
334	va_end(ap);
335	return (1);
336}
337
338static const struct mc_offmap mcreg_offmap[] = {
339	{ MCAMD_REG_DRAMBASE, MCAMD_MC_OFF_DRAMBASE_REG },
340	{ MCAMD_REG_DRAMLIMIT, MCAMD_MC_OFF_DRAMLIMIT_REG },
341	{ MCAMD_REG_DRAMHOLE, MCAMD_MC_OFF_DRAMHOLE_REG },
342	{ MCAMD_REG_DRAMCFGLO, MCAMD_MC_OFF_DRAMCFGLO_REG },
343	{ MCAMD_REG_DRAMCFGHI, MCAMD_MC_OFF_DRAMCFGHI_REG },
344};
345
346static const struct mc_offmap csreg_offmap[] = {
347	{ MCAMD_REG_CSBASE, MCAMD_CS_OFF_CSBASE_REG },
348	{ MCAMD_REG_CSMASK, MCAMD_CS_OFF_CSMASK_REG },
349};
350
351/*ARGSUSED*/
352int
353mcamd_get_cfgreg(struct mcamd_hdl *hdl, mcamd_node_t *node,
354    mcamd_regcode_t code, uint32_t *valp)
355{
356	int found;
357	uint_t offset;
358
359	struct nt_offmap regs[] = {
360		{ mcreg_offmap,	/* MC_NT_MC */
361		    sizeof (mcreg_offmap) / sizeof (struct mc_offmap) },
362		{ csreg_offmap,	/* MC_NT_CS */
363		    sizeof (csreg_offmap) / sizeof (struct mc_offmap) },
364		{ NULL, 0 }		/* MC_NT_DIMM */
365	};
366
367	found = findoffset(hdl, node, &regs[0], code, &offset);
368	ASSERT(found);
369
370	ASSERT(found);
371	if (found)
372		*valp = *(uint32_t *)((uintptr_t)node + offset);
373
374	return (found == 1);
375}
376
377int
378mcamd_get_cfgregs(mcamd_hdl_t *hdl, ...)
379{
380	va_list ap;
381	mcamd_node_t *node;
382	mcamd_regcode_t code;
383	uint32_t *valp;
384
385	va_start(ap, hdl);
386	while ((node = va_arg(ap, mcamd_node_t *)) != NULL) {
387		code = va_arg(ap, mcamd_regcode_t);
388		valp = va_arg(ap, uint32_t *);
389		if (!mcamd_get_cfgreg(hdl, node, code, valp))
390			return (0);
391	}
392	va_end(ap);
393	return (1);
394}
395
396
397int
398mcamd_errno(mcamd_hdl_t *mcamd)
399{
400	return (mcamd->mcamd_errno);
401}
402
403int
404mcamd_set_errno(mcamd_hdl_t *mcamd, int err)
405{
406	mcamd->mcamd_errno = err;
407	return (-1);
408}
409
410void
411mcamd_dprintf(mcamd_hdl_t *mcamd, int mask, const char *fmt, ...)
412{
413	va_list ap;
414
415	if (!(mcamd->mcamd_debug & mask))
416		return;
417
418	va_start(ap, fmt);
419	vcmn_err(mask & MCAMD_DBG_ERR ? CE_WARN : CE_NOTE, fmt, ap);
420	va_end(ap);
421}
422
423void
424mcamd_mkhdl(mcamd_hdl_t *hdl)
425{
426	hdl->mcamd_errno = 0;
427	hdl->mcamd_debug = mcamd_debug;
428}
429
430cmi_errno_t
431mcamd_cmierr(int err, mcamd_hdl_t *hdl)
432{
433	if (err == 0)
434		return (CMI_SUCCESS);
435
436	switch (mcamd_errno(hdl)) {
437	case EMCAMD_SYNDINVALID:
438		return (CMIERR_MC_SYNDROME);
439
440	case EMCAMD_TREEINVALID:
441		return (CMIERR_MC_BADSTATE);
442
443	case EMCAMD_NOADDR:
444		return (CMIERR_MC_NOADDR);
445
446	case EMCAMD_INSUFF_RES:
447		return (CMIERR_MC_ADDRBITS);
448
449	default:
450		return (CMIERR_UNKNOWN);
451	}
452
453}
454
455/*ARGSUSED*/
456cmi_errno_t
457mcamd_patounum_wrap(void *arg, uint64_t pa, uint8_t valid_hi, uint8_t valid_lo,
458    uint32_t synd, int syndtype, mc_unum_t *unump)
459{
460	mcamd_hdl_t mcamd;
461	int rc;
462
463	mcamd_mkhdl(&mcamd);
464
465	rw_enter(&mc_lock, RW_READER);
466
467	rc = mcamd_patounum(&mcamd, (mcamd_node_t *)mc_list, pa,
468	    valid_hi, valid_lo, synd, syndtype, unump);
469
470#ifdef DEBUG
471	/*
472	 * Apply the reverse operation to verify the result.  If there is
473	 * a problem complain but continue.
474	 */
475	if (rc == 0 && MCAMD_RC_OFFSET_VALID(unump->unum_offset)) {
476		uint64_t rpa;
477		if (mcamd_unumtopa(&mcamd, (mcamd_node_t *)mc_list, unump,
478		    &rpa) != 0 || rpa != pa) {
479			mcamd_dprintf(&mcamd, MCAMD_DBG_ERR,
480			    "mcamd_patounum_wrap: offset calculation "
481			    "verification for PA 0x%llx failed\n", pa);
482		}
483	}
484#endif
485	rw_exit(&mc_lock);
486
487	return (mcamd_cmierr(rc, &mcamd));
488}
489
490static int
491fmri2unum(nvlist_t *nvl, mc_unum_t *unump)
492{
493	int i;
494	uint64_t offset;
495	nvlist_t *hcsp, **hcl;
496	uint_t npr;
497
498	if (nvlist_lookup_nvlist(nvl, FM_FMRI_HC_SPECIFIC, &hcsp) != 0 ||
499	    (nvlist_lookup_uint64(hcsp, "asru-" FM_FMRI_HC_SPECIFIC_OFFSET,
500	    &offset) != 0 && nvlist_lookup_uint64(hcsp,
501	    FM_FMRI_HC_SPECIFIC_OFFSET, &offset) != 0) ||
502	    nvlist_lookup_nvlist_array(nvl, FM_FMRI_HC_LIST, &hcl, &npr) != 0)
503		return (0);
504
505
506	bzero(unump, sizeof (mc_unum_t));
507	unump->unum_chan = MC_INVALNUM;
508	for (i = 0; i < MC_UNUM_NDIMM; i++)
509		unump->unum_dimms[i] = MC_INVALNUM;
510
511	for (i = 0; i < npr; i++) {
512		char *hcnm, *hcid;
513		long v;
514
515		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &hcnm) != 0 ||
516		    nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0 ||
517		    ddi_strtol(hcid, NULL, 0, &v) != 0)
518			return (0);
519
520		if (strcmp(hcnm, "motherboard") == 0)
521			unump->unum_board = (int)v;
522		else if (strcmp(hcnm, "chip") == 0)
523			unump->unum_chip = (int)v;
524		else if (strcmp(hcnm, "memory-controller") == 0)
525			unump->unum_mc = (int)v;
526		else if (strcmp(hcnm, "chip-select") == 0)
527			unump->unum_cs = (int)v;
528		else if (strcmp(hcnm, "dimm") == 0)
529			unump->unum_dimms[0] = (int)v;
530		else if (strcmp(hcnm, "rank") == 0)
531			unump->unum_rank = (int)v;
532	}
533
534	unump->unum_offset = offset;
535
536	return (1);
537}
538
539/*ARGSUSED*/
540cmi_errno_t
541mcamd_unumtopa_wrap(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap)
542{
543	mcamd_hdl_t mcamd;
544	int rc;
545	mc_unum_t unum;
546
547	ASSERT(unump == NULL || nvl == NULL);	/* enforced at cmi level */
548
549	if (unump == NULL) {
550		if (!fmri2unum(nvl, &unum))
551			return (CMIERR_MC_INVALUNUM);
552		unump = &unum;
553	}
554
555	mcamd_mkhdl(&mcamd);
556
557	rw_enter(&mc_lock, RW_READER);
558	rc = mcamd_unumtopa(&mcamd, (mcamd_node_t *)mc_list, unump, pap);
559	rw_exit(&mc_lock);
560
561	return (mcamd_cmierr(rc, &mcamd));
562}
563
564static void
565mc_ereport_dimm_resource(mc_unum_t *unump, nvlist_t *elems[], int *nump,
566    mc_t *mc)
567{
568	int i;
569
570	for (i = 0; i < MC_UNUM_NDIMM; i++) {
571		if (unump->unum_dimms[i] == MC_INVALNUM)
572			break;
573
574		elems[(*nump)++] = fm_nvlist_create(NULL);
575
576		if (!x86gentopo_legacy && mc->smb_bboard != NULL) {
577			fm_fmri_hc_create(elems[i], FM_HC_SCHEME_VERSION,
578			    NULL, NULL, mc->smb_bboard, 4,
579			    "chip", mc->smb_chipid,
580			    "memory-controller", unump->unum_mc,
581			    "dimm", unump->unum_dimms[i],
582			    "rank", unump->unum_rank);
583		} else {
584			fm_fmri_hc_set(elems[i], FM_HC_SCHEME_VERSION,
585			    NULL, NULL, 5,
586			    "motherboard",  unump->unum_board,
587			    "chip", unump->unum_chip,
588			    "memory-controller", unump->unum_mc,
589			    "dimm", unump->unum_dimms[i],
590			    "rank", unump->unum_rank);
591		}
592	}
593}
594
595static void
596mc_ereport_cs_resource(mc_unum_t *unump, nvlist_t *elems[], int *nump, mc_t *mc)
597{
598	elems[0] = fm_nvlist_create(NULL);
599
600	if (!x86gentopo_legacy && mc->smb_bboard != NULL) {
601		fm_fmri_hc_create(elems[0], FM_HC_SCHEME_VERSION, NULL, NULL,
602		    mc->smb_bboard, 3,
603		    "chip", mc->smb_chipid,
604		    "memory-controller", unump->unum_mc,
605		    "chip-select", unump->unum_cs);
606	} else {
607		fm_fmri_hc_set(elems[0], FM_HC_SCHEME_VERSION, NULL, NULL, 4,
608		    "motherboard",  unump->unum_board,
609		    "chip", unump->unum_chip,
610		    "memory-controller", unump->unum_mc,
611		    "chip-select", unump->unum_cs);
612	}
613	*nump = 1;
614}
615
616/*
617 * Create the 'resource' payload member from the unum info.  If valid
618 * dimm numbers are present in the unum info then create members
619 * identifying the dimm and rank;  otherwise if a valid chip-select
620 * number is indicated then create a member identifying the chip-select
621 * topology node.
622 */
623static void
624mc_ereport_add_resource(nvlist_t *payload, mc_unum_t *unump, mc_t *mc)
625{
626	nvlist_t *elems[MC_UNUM_NDIMM];
627	int nelems = 0;
628	int i;
629
630	if (unump->unum_dimms[0] != MC_INVALNUM)
631		mc_ereport_dimm_resource(unump, elems, &nelems, mc);
632	else if (unump->unum_cs != MC_INVALNUM)
633		mc_ereport_cs_resource(unump, elems, &nelems, mc);
634
635	if (nelems > 0) {
636		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE,
637		    DATA_TYPE_NVLIST_ARRAY, nelems, elems, NULL);
638
639		for (i = 0; i < nelems; i++)
640			fm_nvlist_destroy(elems[i], FM_NVA_FREE);
641	}
642}
643
644static void
645mc_ereport_add_payload(nvlist_t *ereport, uint64_t members, mc_unum_t *unump,
646    mc_t *mc)
647{
648	if (members & FM_EREPORT_PAYLOAD_FLAG_RESOURCE &&
649	    unump != NULL)
650		mc_ereport_add_resource(ereport, unump, mc);
651}
652
653static nvlist_t *
654mc_fmri_create(mc_t *mc)
655{
656	nvlist_t *nvl = fm_nvlist_create(NULL);
657
658	if (!x86gentopo_legacy && mc->smb_bboard != NULL) {
659		fm_fmri_hc_create(nvl, FM_HC_SCHEME_VERSION, NULL, NULL,
660		    mc->smb_bboard, 2,
661		    "chip", mc->smb_chipid,
662		    "memory-controller", 0);
663	} else {
664		fm_fmri_hc_set(nvl, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
665		    "motherboard", 0,
666		    "chip", mc->mc_props.mcp_num,
667		    "memory-controller", 0);
668	}
669
670	return (nvl);
671}
672
673/*
674 * Simple ereport generator for errors detected by the memory controller.
675 * Posts an ereport of class ereport.cpu.amd.<class_sfx> with a resource nvlist
676 * derived from the given mc_unum_t.  There are no other payload members.
677 * The mc argument is used to formulate a detector and this mc should
678 * correspond with that identified in the mc_unum_t.
679 *
680 * There is no control of which members to include the the resulting ereport -
681 * it will be an ereport formed using the given class suffix, detector
682 * indicated as the memory-controller and with a resource generated by
683 * expanding the given mc_unum_t.
684 *
685 * We do not use any special nv allocator here and so this is not suitable
686 * for use during panic.  It is intended for use during MC topology
687 * discovery and other controlled circumstances.
688 */
689void
690mcamd_ereport_post(mc_t *mc, const char *class_sfx, mc_unum_t *unump,
691    uint64_t payload)
692{
693	nvlist_t *ereport, *detector;
694	char buf[FM_MAX_CLASS];
695
696	ereport = fm_nvlist_create(NULL);
697	detector = mc_fmri_create(mc);
698
699	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s", FM_ERROR_CPU,
700	    "amd", class_sfx);
701	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
702	    fm_ena_generate(gethrtime(), FM_ENA_FMT1), detector, NULL);
703	fm_nvlist_destroy(detector, FM_NVA_FREE);
704
705	mc_ereport_add_payload(ereport, payload, unump, mc);
706
707	(void) fm_ereport_post(ereport, EVCH_TRYHARD);
708	fm_nvlist_destroy(ereport, FM_NVA_FREE);
709}
710
711static const cmi_mc_ops_t mcamd_mc_ops = {
712	mcamd_patounum_wrap,	/* cmi_mc_patounum */
713	mcamd_unumtopa_wrap,	/* cmi_mc_unumtopa */
714	NULL			/* cmi_mc_logout */
715};
716
717void
718mcamd_mc_register(cmi_hdl_t hdl, mc_t *mc)
719{
720	cmi_mc_register(hdl, &mcamd_mc_ops, mc);
721}
722