1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2018, Joyent, Inc.
25  * Copyright 2023 Oxide Computer Company
26  */
27 
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/time.h>
31 
32 #include <sys/fm/protocol.h>
33 #include <sys/fm/smb/fmsmb.h>
34 #include <sys/devfm.h>
35 
36 #include <sys/cpu_module.h>
37 
38 #define	ANY_ID		(uint_t)-1
39 
40 /*
41  * INIT_HDLS is the initial size of cmi_hdl_t array.  We fill the array
42  * during cmi_hdl_walk, if the array overflows, we will reallocate
43  * a new array twice the size of the old one.
44  */
45 #define	INIT_HDLS	16
46 
47 typedef struct fm_cmi_walk_t
48 {
49 	uint_t	chipid;		/* chipid to match during walk */
50 	uint_t	coreid;		/* coreid to match */
51 	uint_t	strandid;	/* strandid to match */
52 	int	(*cbfunc)(cmi_hdl_t, void *, void *);	/* callback function */
53 	cmi_hdl_t *hdls;	/* allocated array to save the handles */
54 	uint_t	nhdl_max;	/* allocated array size */
55 	uint_t	nhdl;		/* handles saved */
56 } fm_cmi_walk_t;
57 
58 extern int x86gentopo_legacy;
59 
60 int
fm_get_paddr(nvlist_t * nvl,uint64_t * paddr)61 fm_get_paddr(nvlist_t *nvl, uint64_t *paddr)
62 {
63 	uint8_t version;
64 	uint64_t pa;
65 	char *scheme;
66 	int err;
67 
68 	/* Verify FMRI scheme name and version number */
69 	if ((nvlist_lookup_string(nvl, FM_FMRI_SCHEME, &scheme) != 0) ||
70 	    (strcmp(scheme, FM_FMRI_SCHEME_HC) != 0) ||
71 	    (nvlist_lookup_uint8(nvl, FM_VERSION, &version) != 0) ||
72 	    version > FM_HC_SCHEME_VERSION) {
73 		return (EINVAL);
74 	}
75 
76 	if ((err = cmi_mc_unumtopa(NULL, nvl, &pa)) != CMI_SUCCESS &&
77 	    err != CMIERR_MC_PARTIALUNUMTOPA)
78 		return (EINVAL);
79 
80 	*paddr = pa;
81 	return (0);
82 }
83 
84 /*
85  * Routines for cmi handles walk.
86  */
87 
88 static void
walk_init(fm_cmi_walk_t * wp,uint_t chipid,uint_t coreid,uint_t strandid,int (* cbfunc)(cmi_hdl_t,void *,void *))89 walk_init(fm_cmi_walk_t *wp, uint_t chipid, uint_t coreid, uint_t strandid,
90     int (*cbfunc)(cmi_hdl_t, void *, void *))
91 {
92 	wp->chipid = chipid;
93 	wp->coreid = coreid;
94 	wp->strandid = strandid;
95 	/*
96 	 * If callback is not set, we allocate an array to save the
97 	 * cmi handles.
98 	 */
99 	if ((wp->cbfunc = cbfunc) == NULL) {
100 		wp->hdls = kmem_alloc(sizeof (cmi_hdl_t) * INIT_HDLS, KM_SLEEP);
101 		wp->nhdl_max = INIT_HDLS;
102 		wp->nhdl = 0;
103 	}
104 }
105 
106 static void
walk_fini(fm_cmi_walk_t * wp)107 walk_fini(fm_cmi_walk_t *wp)
108 {
109 	if (wp->cbfunc == NULL)
110 		kmem_free(wp->hdls, sizeof (cmi_hdl_t) * wp->nhdl_max);
111 }
112 
113 static int
select_cmi_hdl(cmi_hdl_t hdl,void * arg1,void * arg2,void * arg3)114 select_cmi_hdl(cmi_hdl_t hdl, void *arg1, void *arg2, void *arg3)
115 {
116 	fm_cmi_walk_t *wp = (fm_cmi_walk_t *)arg1;
117 
118 	if (wp->chipid != ANY_ID && wp->chipid != cmi_hdl_chipid(hdl))
119 		return (CMI_HDL_WALK_NEXT);
120 	if (wp->coreid != ANY_ID && wp->coreid != cmi_hdl_coreid(hdl))
121 		return (CMI_HDL_WALK_NEXT);
122 	if (wp->strandid != ANY_ID && wp->strandid != cmi_hdl_strandid(hdl))
123 		return (CMI_HDL_WALK_NEXT);
124 
125 	/*
126 	 * Call the callback function if any exists, otherwise we hold a
127 	 * reference of the handle and push it to preallocated array.
128 	 * If the allocated array is going to overflow, reallocate a
129 	 * bigger one to replace it.
130 	 */
131 	if (wp->cbfunc != NULL)
132 		return (wp->cbfunc(hdl, arg2, arg3));
133 
134 	if (wp->nhdl == wp->nhdl_max) {
135 		size_t sz = sizeof (cmi_hdl_t) * wp->nhdl_max;
136 		cmi_hdl_t *newarray = kmem_alloc(sz << 1, KM_SLEEP);
137 
138 		bcopy(wp->hdls, newarray, sz);
139 		kmem_free(wp->hdls, sz);
140 		wp->hdls = newarray;
141 		wp->nhdl_max <<= 1;
142 	}
143 
144 	cmi_hdl_hold(hdl);
145 	wp->hdls[wp->nhdl++] = hdl;
146 
147 	return (CMI_HDL_WALK_NEXT);
148 }
149 
150 static void
populate_cpu(nvlist_t ** nvlp,cmi_hdl_t hdl)151 populate_cpu(nvlist_t **nvlp, cmi_hdl_t hdl)
152 {
153 	uint_t	fm_chipid;
154 	uint16_t smbios_id;
155 	const char *idstr;
156 
157 	(void) nvlist_alloc(nvlp, NV_UNIQUE_NAME, KM_SLEEP);
158 
159 	/*
160 	 * If SMBIOS satisfies FMA Topology needs, gather
161 	 * more information on the chip's physical roots
162 	 * like /chassis=x/motherboard=y/cpuboard=z and
163 	 * set the chip_id to match the SMBIOS' Type 4
164 	 * ordering & this has to match the ereport's chip
165 	 * resource instance derived off of SMBIOS.
166 	 * Multi-Chip-Module support should set the chipid
167 	 * in terms of the processor package rather than
168 	 * the die/node in the processor package, for FM.
169 	 */
170 
171 	if (!x86gentopo_legacy) {
172 		smbios_id = cmi_hdl_smbiosid(hdl);
173 		fm_chipid = cmi_hdl_smb_chipid(hdl);
174 		(void) nvlist_add_nvlist(*nvlp, FM_PHYSCPU_INFO_CHIP_ROOTS,
175 		    cmi_hdl_smb_bboard(hdl));
176 		(void) nvlist_add_uint16(*nvlp, FM_PHYSCPU_INFO_SMBIOS_ID,
177 		    (uint16_t)smbios_id);
178 	} else
179 		fm_chipid = cmi_hdl_chipid(hdl);
180 
181 	fm_payload_set(*nvlp,
182 	    FM_PHYSCPU_INFO_VENDOR_ID, DATA_TYPE_STRING,
183 	    cmi_hdl_vendorstr(hdl),
184 	    FM_PHYSCPU_INFO_FAMILY, DATA_TYPE_INT32,
185 	    (int32_t)cmi_hdl_family(hdl),
186 	    FM_PHYSCPU_INFO_MODEL, DATA_TYPE_INT32,
187 	    (int32_t)cmi_hdl_model(hdl),
188 	    FM_PHYSCPU_INFO_STEPPING, DATA_TYPE_INT32,
189 	    (int32_t)cmi_hdl_stepping(hdl),
190 	    FM_PHYSCPU_INFO_CHIP_ID, DATA_TYPE_INT32,
191 	    (int32_t)fm_chipid,
192 	    FM_PHYSCPU_INFO_NPROCNODES, DATA_TYPE_INT32,
193 	    (int32_t)cmi_hdl_procnodes_per_pkg(hdl),
194 	    FM_PHYSCPU_INFO_PROCNODE_ID, DATA_TYPE_INT32,
195 	    (int32_t)cmi_hdl_procnodeid(hdl),
196 	    FM_PHYSCPU_INFO_CORE_ID, DATA_TYPE_INT32,
197 	    (int32_t)cmi_hdl_coreid(hdl),
198 	    FM_PHYSCPU_INFO_STRAND_ID, DATA_TYPE_INT32,
199 	    (int32_t)cmi_hdl_strandid(hdl),
200 	    FM_PHYSCPU_INFO_STRAND_APICID, DATA_TYPE_INT32,
201 	    (int32_t)cmi_hdl_strand_apicid(hdl),
202 	    FM_PHYSCPU_INFO_CHIP_REV, DATA_TYPE_STRING,
203 	    cmi_hdl_chiprevstr(hdl),
204 	    FM_PHYSCPU_INFO_SOCKET_TYPE, DATA_TYPE_UINT32,
205 	    (uint32_t)cmi_hdl_getsockettype(hdl),
206 	    FM_PHYSCPU_INFO_CPU_ID, DATA_TYPE_INT32,
207 	    (int32_t)cmi_hdl_logical_id(hdl),
208 	    NULL);
209 
210 	/*
211 	 * Do this separately so that way if there is no ident string we do not
212 	 * trigger an error.
213 	 */
214 	if ((idstr = cmi_hdl_chipident(hdl)) != NULL) {
215 		fm_payload_set(*nvlp,
216 		    FM_PHYSCPU_INFO_CHIP_IDENTSTR, DATA_TYPE_STRING, idstr,
217 		    NULL);
218 	}
219 }
220 
221 /*ARGSUSED*/
222 int
fm_ioctl_physcpu_info(int cmd,nvlist_t * invl,nvlist_t ** onvlp)223 fm_ioctl_physcpu_info(int cmd, nvlist_t *invl, nvlist_t **onvlp)
224 {
225 	nvlist_t **cpus, *nvl;
226 	int i, err;
227 	fm_cmi_walk_t wk;
228 
229 	/*
230 	 * Do a walk to save all the cmi handles in the array.
231 	 */
232 	walk_init(&wk, ANY_ID, ANY_ID, ANY_ID, NULL);
233 	cmi_hdl_walk(select_cmi_hdl, &wk, NULL, NULL);
234 
235 	if (wk.nhdl == 0) {
236 		walk_fini(&wk);
237 		return (ENOENT);
238 	}
239 
240 	cpus = kmem_alloc(sizeof (nvlist_t *) * wk.nhdl, KM_SLEEP);
241 	for (i = 0; i < wk.nhdl; i++) {
242 		populate_cpu(cpus + i, wk.hdls[i]);
243 		cmi_hdl_rele(wk.hdls[i]);
244 	}
245 
246 	walk_fini(&wk);
247 
248 	(void) nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
249 	err = nvlist_add_nvlist_array(nvl, FM_PHYSCPU_INFO_CPUS,
250 	    cpus, wk.nhdl);
251 
252 	for (i = 0; i < wk.nhdl; i++)
253 		nvlist_free(cpus[i]);
254 	kmem_free(cpus, sizeof (nvlist_t *) * wk.nhdl);
255 
256 	if (err != 0) {
257 		nvlist_free(nvl);
258 		return (err);
259 	}
260 
261 	*onvlp = nvl;
262 	return (0);
263 }
264 
265 int
fm_ioctl_cpu_retire(int cmd,nvlist_t * invl,nvlist_t ** onvlp)266 fm_ioctl_cpu_retire(int cmd, nvlist_t *invl, nvlist_t **onvlp)
267 {
268 	int32_t chipid, coreid, strandid;
269 	int rc, new_status, old_status;
270 	cmi_hdl_t hdl;
271 	nvlist_t *nvl;
272 
273 	switch (cmd) {
274 	case FM_IOC_CPU_RETIRE:
275 		new_status = P_FAULTED;
276 		break;
277 	case FM_IOC_CPU_STATUS:
278 		new_status = P_STATUS;
279 		break;
280 	case FM_IOC_CPU_UNRETIRE:
281 		new_status = P_ONLINE;
282 		break;
283 	default:
284 		return (ENOTTY);
285 	}
286 
287 	if (nvlist_lookup_int32(invl, FM_CPU_RETIRE_CHIP_ID, &chipid) != 0 ||
288 	    nvlist_lookup_int32(invl, FM_CPU_RETIRE_CORE_ID, &coreid) != 0 ||
289 	    nvlist_lookup_int32(invl, FM_CPU_RETIRE_STRAND_ID, &strandid) != 0)
290 		return (EINVAL);
291 
292 	hdl = cmi_hdl_lookup(CMI_HDL_NEUTRAL, chipid, coreid, strandid);
293 	if (hdl == NULL)
294 		return (EINVAL);
295 
296 	rc = cmi_hdl_online(hdl, new_status, &old_status);
297 	cmi_hdl_rele(hdl);
298 
299 	if (rc == 0) {
300 		(void) nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
301 		(void) nvlist_add_int32(nvl, FM_CPU_RETIRE_OLDSTATUS,
302 		    old_status);
303 		*onvlp = nvl;
304 	}
305 
306 	return (rc);
307 }
308 
309 /*
310  * Retrun the value of x86gentopo_legacy variable as an nvpair.
311  *
312  * The caller is responsible for freeing the nvlist.
313  */
314 /* ARGSUSED */
315 int
fm_ioctl_gentopo_legacy(int cmd,nvlist_t * invl,nvlist_t ** onvlp)316 fm_ioctl_gentopo_legacy(int cmd, nvlist_t *invl, nvlist_t **onvlp)
317 {
318 	nvlist_t *nvl;
319 
320 	if (cmd != FM_IOC_GENTOPO_LEGACY) {
321 		return (ENOTTY);
322 	}
323 
324 	/*
325 	 * Inform the caller of the intentions of the ereport generators to
326 	 * generate either a "generic" or "legacy" x86 topology.
327 	 */
328 
329 	(void) nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
330 	(void) nvlist_add_int32(nvl, FM_GENTOPO_LEGACY, x86gentopo_legacy);
331 	*onvlp = nvl;
332 
333 	return (0);
334 }
335 
336 /*
337  * This is an internal bound on the maximum number of caches that we expect to
338  * encounter to reduce dynamic allocation.
339  */
340 #define	FM_MAX_CACHES	0x10
341 
342 static int
fm_cmi_cache_err_to_errno(cmi_errno_t cmi)343 fm_cmi_cache_err_to_errno(cmi_errno_t cmi)
344 {
345 	switch (cmi) {
346 	case CMIERR_C_NODATA:
347 		return (ENOTSUP);
348 	/*
349 	 * Right now, CMIERR_C_BADCACHENO is explicitly not mapped to EINVAL
350 	 * (which is what it maps to in cmi_hw.c.). This discrepancy exists
351 	 * because there's nothing in a user request here that'd end up
352 	 * resulting in an invalid value, it can only occur because we asked
353 	 * for a cache that we were told exists, but doesn't actually. If we
354 	 * returned EINVAL, the user would be wondering what was invalid about
355 	 * their request.
356 	 */
357 	case CMIERR_C_BADCACHENO:
358 	default:
359 		return (EIO);
360 	}
361 }
362 
363 static int
fm_populate_cache(cmi_hdl_t hdl,nvlist_t * nvl,uint_t cpuno)364 fm_populate_cache(cmi_hdl_t hdl, nvlist_t *nvl, uint_t cpuno)
365 {
366 	int ret;
367 	cmi_errno_t err;
368 	uint32_t ncache;
369 	nvlist_t *caches[FM_MAX_CACHES];
370 	char buf[32];
371 
372 	err = cmi_cache_ncaches(hdl, &ncache);
373 	if (err != CMI_SUCCESS) {
374 		return (fm_cmi_cache_err_to_errno(err));
375 	}
376 
377 	/*
378 	 * Our promise to userland is that if we skip a value here then there
379 	 * are no caches.
380 	 */
381 	if (ncache == 0) {
382 		return (0);
383 	} else if (ncache > FM_MAX_CACHES) {
384 		return (EOVERFLOW);
385 	}
386 
387 	bzero(caches, sizeof (caches));
388 	for (uint32_t i = 0; i < ncache; i++) {
389 		x86_cache_t c;
390 		fm_cache_info_type_t type = 0;
391 
392 		(void) nvlist_alloc(&caches[i], NV_UNIQUE_NAME, KM_SLEEP);
393 		err = cmi_cache_info(hdl, i, &c);
394 		if (err != CMI_SUCCESS) {
395 			ret = fm_cmi_cache_err_to_errno(err);
396 			goto cleanup;
397 		}
398 
399 		fnvlist_add_uint32(caches[i], FM_CACHE_INFO_LEVEL, c.xc_level);
400 		switch (c.xc_type) {
401 		case X86_CACHE_TYPE_DATA:
402 			type = FM_CACHE_INFO_T_DATA;
403 			break;
404 		case X86_CACHE_TYPE_INST:
405 			type = FM_CACHE_INFO_T_INSTR;
406 			break;
407 		case X86_CACHE_TYPE_UNIFIED:
408 			type = FM_CACHE_INFO_T_DATA | FM_CACHE_INFO_T_INSTR |
409 			    FM_CACHE_INFO_T_UNIFIED;
410 			break;
411 		default:
412 			break;
413 		}
414 		fnvlist_add_uint32(caches[i], FM_CACHE_INFO_TYPE,
415 		    (uint32_t)type);
416 		fnvlist_add_uint64(caches[i], FM_CACHE_INFO_NSETS, c.xc_nsets);
417 		fnvlist_add_uint32(caches[i], FM_CACHE_INFO_NWAYS, c.xc_nways);
418 		fnvlist_add_uint32(caches[i], FM_CACHE_INFO_LINE_SIZE,
419 		    c.xc_line_size);
420 		fnvlist_add_uint64(caches[i], FM_CACHE_INFO_TOTAL_SIZE,
421 		    c.xc_size);
422 		if ((c.xc_flags & X86_CACHE_F_FULL_ASSOC) != 0) {
423 			fnvlist_add_boolean(caches[i],
424 			    FM_CACHE_INFO_FULLY_ASSOC);
425 		}
426 		fnvlist_add_uint64(caches[i], FM_CACHE_INFO_ID, c.xc_id);
427 		fnvlist_add_uint32(caches[i], FM_CACHE_INFO_X86_APIC_SHIFT,
428 		    c.xc_apic_shift);
429 	}
430 
431 	(void) snprintf(buf, sizeof (buf), "%u", cpuno);
432 	fnvlist_add_nvlist_array(nvl, buf, caches, (uint_t)ncache);
433 	ret = 0;
434 
435 cleanup:
436 	for (uint32_t i = 0; i < ncache; i++) {
437 		nvlist_free(caches[i]);
438 	}
439 	return (ret);
440 }
441 
442 /*
443  * Gather all of the different per-CPU leaves and return them as a series of
444  * nvlists.
445  */
446 int
fm_ioctl_cache_info(int cmd,nvlist_t * invl,nvlist_t ** onvlp)447 fm_ioctl_cache_info(int cmd, nvlist_t *invl, nvlist_t **onvlp)
448 {
449 	int ret = 0;
450 	fm_cmi_walk_t walk;
451 	nvlist_t *nvl;
452 
453 	if (cmd != FM_IOC_CACHE_INFO) {
454 		return (ENOTTY);
455 	}
456 
457 	walk_init(&walk, ANY_ID, ANY_ID, ANY_ID, NULL);
458 	cmi_hdl_walk(select_cmi_hdl, &walk, NULL, NULL);
459 	if (walk.nhdl == 0) {
460 		walk_fini(&walk);
461 		return (ENOENT);
462 	}
463 
464 	(void) nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
465 	fnvlist_add_uint32(nvl, FM_CACHE_INFO_NCPUS, walk.nhdl);
466 
467 	for (uint_t i = 0; i < walk.nhdl; i++) {
468 		if ((ret = fm_populate_cache(walk.hdls[i], nvl, i)) != 0) {
469 			break;
470 		}
471 		cmi_hdl_rele(walk.hdls[i]);
472 	}
473 	walk_fini(&walk);
474 
475 	if (ret == 0) {
476 		*onvlp = nvl;
477 	} else {
478 		nvlist_free(nvl);
479 	}
480 
481 	return (ret);
482 }
483