1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
28 */
29
30#include <sys/asm_linkage.h>
31#include <sys/bootconf.h>
32#include <sys/cpuvar.h>
33#include <sys/cmn_err.h>
34#include <sys/controlregs.h>
35#include <sys/debug.h>
36#include <sys/kobj.h>
37#include <sys/kobj_impl.h>
38#include <sys/machsystm.h>
39#include <sys/ontrap.h>
40#include <sys/param.h>
41#include <sys/machparam.h>
42#include <sys/promif.h>
43#include <sys/sysmacros.h>
44#include <sys/systm.h>
45#include <sys/types.h>
46#include <sys/thread.h>
47#include <sys/ucode.h>
48#include <sys/x86_archext.h>
49#include <sys/x_call.h>
50#ifdef	__xpv
51#include <sys/hypervisor.h>
52#endif
53
54/*
55 * AMD-specific equivalence table
56 */
57static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
58
59/*
60 * mcpu_ucode_info for the boot CPU.  Statically allocated.
61 */
62static struct cpu_ucode_info cpu_ucode_info0;
63
64static ucode_file_t ucodefile;
65
66static void* ucode_zalloc(processorid_t, size_t);
67static void ucode_free(processorid_t, void *, size_t);
68
69static int ucode_capable_amd(cpu_t *);
70static int ucode_capable_intel(cpu_t *);
71
72static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
73static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
74    int);
75
76static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
77static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
78
79static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
80static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
81
82#ifdef	__xpv
83static void ucode_load_xpv(ucode_update_t *);
84static void ucode_chipset_amd(uint8_t *, int);
85#endif
86
87static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
88
89static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
90    ucode_file_t *);
91static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
92    ucode_file_t *);
93
94#ifndef __xpv
95static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
96    ucode_file_amd_t *, int);
97#endif
98static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
99    ucode_header_intel_t *, ucode_ext_table_intel_t *);
100
101static void ucode_read_rev_amd(cpu_ucode_info_t *);
102static void ucode_read_rev_intel(cpu_ucode_info_t *);
103
104static const struct ucode_ops ucode_amd = {
105	MSR_AMD_PATCHLOADER,
106	ucode_capable_amd,
107	ucode_file_reset_amd,
108	ucode_read_rev_amd,
109	ucode_load_amd,
110	ucode_validate_amd,
111	ucode_extract_amd,
112	ucode_locate_amd
113};
114
115static const struct ucode_ops ucode_intel = {
116	MSR_INTC_UCODE_WRITE,
117	ucode_capable_intel,
118	ucode_file_reset_intel,
119	ucode_read_rev_intel,
120	ucode_load_intel,
121	ucode_validate_intel,
122	ucode_extract_intel,
123	ucode_locate_intel
124};
125
126const struct ucode_ops *ucode;
127
128static const char ucode_failure_fmt[] =
129	"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
130static const char ucode_success_fmt[] =
131	"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
132
133/*
134 * Force flag.  If set, the first microcode binary that matches
135 * signature and platform id will be used for microcode update,
136 * regardless of version.  Should only be used for debugging.
137 */
138int ucode_force_update = 0;
139
140/*
141 * Allocate space for mcpu_ucode_info in the machcpu structure
142 * for all non-boot CPUs.
143 */
144void
145ucode_alloc_space(cpu_t *cp)
146{
147	ASSERT(cp->cpu_id != 0);
148	ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
149	cp->cpu_m.mcpu_ucode_info =
150	    kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
151}
152
153void
154ucode_free_space(cpu_t *cp)
155{
156	ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
157	ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
158	kmem_free(cp->cpu_m.mcpu_ucode_info,
159	    sizeof (*cp->cpu_m.mcpu_ucode_info));
160	cp->cpu_m.mcpu_ucode_info = NULL;
161}
162
163/*
164 * Called when we are done with microcode update on all processors to free up
165 * space allocated for the microcode file.
166 */
167void
168ucode_cleanup()
169{
170	if (ucode == NULL)
171		return;
172
173	ucode->file_reset(&ucodefile, -1);
174}
175
176/*
177 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
178 * allocated with BOP_ALLOC() and does not require a free.
179 */
180static void*
181ucode_zalloc(processorid_t id, size_t size)
182{
183	if (id)
184		return (kmem_zalloc(size, KM_NOSLEEP));
185
186	/* BOP_ALLOC() failure results in panic */
187	return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
188}
189
190static void
191ucode_free(processorid_t id, void* buf, size_t size)
192{
193	if (id)
194		kmem_free(buf, size);
195}
196
197/*
198 * Check whether or not a processor is capable of microcode operations
199 * Returns 1 if it is capable, 0 if not.
200 *
201 * At this point we only support microcode update for:
202 * - Intel processors family 6 and above, and
203 * - AMD processors family 0x10 and above.
204 *
205 * We also assume that we don't support a mix of Intel and
206 * AMD processors in the same box.
207 *
208 * An i86xpv guest domain or VM can't update the microcode.
209 */
210
211#define	XPVDOMU_OR_HVM	\
212	((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0)
213
214/*ARGSUSED*/
215static int
216ucode_capable_amd(cpu_t *cp)
217{
218	int hwenv = get_hwenv();
219
220	if (XPVDOMU_OR_HVM)
221		return (0);
222
223	return (cpuid_getfamily(cp) >= 0x10);
224}
225
226static int
227ucode_capable_intel(cpu_t *cp)
228{
229	int hwenv = get_hwenv();
230
231	if (XPVDOMU_OR_HVM)
232		return (0);
233
234	return (cpuid_getfamily(cp) >= 6);
235}
236
237/*
238 * Called when it is no longer necessary to keep the microcode around,
239 * or when the cached microcode doesn't match the CPU being processed.
240 */
241static void
242ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
243{
244	ucode_file_amd_t *ucodefp = ufp->amd;
245
246	if (ucodefp == NULL)
247		return;
248
249	ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
250	ufp->amd = NULL;
251}
252
253static void
254ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
255{
256	ucode_file_intel_t *ucodefp = &ufp->intel;
257	int total_size, body_size;
258
259	if (ucodefp == NULL || ucodefp->uf_header == NULL)
260		return;
261
262	total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
263	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
264	if (ucodefp->uf_body) {
265		ucode_free(id, ucodefp->uf_body, body_size);
266		ucodefp->uf_body = NULL;
267	}
268
269	if (ucodefp->uf_ext_table) {
270		int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
271
272		ucode_free(id, ucodefp->uf_ext_table, size);
273		ucodefp->uf_ext_table = NULL;
274	}
275
276	ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
277	ucodefp->uf_header = NULL;
278}
279
280/*
281 * Find the equivalent CPU id in the equivalence table.
282 */
283static int
284ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
285{
286	char name[MAXPATHLEN];
287	intptr_t fd;
288	int count;
289	int offset = 0, cpi_sig = cpuid_getsig(cp);
290	ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
291
292	(void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
293	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
294
295	/*
296	 * No kmem_zalloc() etc. available on boot cpu.
297	 */
298	if (cp->cpu_id == 0) {
299		if ((fd = kobj_open(name)) == -1)
300			return (EM_OPENFILE);
301		/* ucode_zalloc() cannot fail on boot cpu */
302		eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
303		ASSERT(eqtbl);
304		do {
305			count = kobj_read(fd, (int8_t *)eqtbl,
306			    sizeof (*eqtbl), offset);
307			if (count != sizeof (*eqtbl)) {
308				(void) kobj_close(fd);
309				return (EM_HIGHERREV);
310			}
311			offset += count;
312		} while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
313		(void) kobj_close(fd);
314	}
315
316	/*
317	 * If not already done, load the equivalence table.
318	 * Not done on boot CPU.
319	 */
320	if (eqtbl == NULL) {
321		struct _buf *eq;
322		uint64_t size;
323
324		if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
325			return (EM_OPENFILE);
326
327		if (kobj_get_filesize(eq, &size) < 0) {
328			kobj_close_file(eq);
329			return (EM_OPENFILE);
330		}
331
332		ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
333		if (ucode_eqtbl_amd == NULL) {
334			kobj_close_file(eq);
335			return (EM_NOMEM);
336		}
337
338		count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
339		kobj_close_file(eq);
340
341		if (count != size)
342			return (EM_FILESIZE);
343	}
344
345	/* Get the equivalent CPU id. */
346	if (cp->cpu_id)
347		for (eqtbl = ucode_eqtbl_amd;
348		    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
349		    eqtbl++)
350			;
351
352	*eq_sig = eqtbl->ue_equiv_cpu;
353
354	/* No equivalent CPU id found, assume outdated microcode file. */
355	if (*eq_sig == 0)
356		return (EM_HIGHERREV);
357
358	return (EM_OK);
359}
360
361/*
362 * xVM cannot check for the presence of PCI devices. Look for chipset-
363 * specific microcode patches in the container file and disable them
364 * by setting their CPU revision to an invalid value.
365 */
366#ifdef __xpv
367static void
368ucode_chipset_amd(uint8_t *buf, int size)
369{
370	ucode_header_amd_t *uh;
371	uint32_t *ptr = (uint32_t *)buf;
372	int len = 0;
373
374	/* skip to first microcode patch */
375	ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
376
377	while (size >= sizeof (ucode_header_amd_t) + 8) {
378		ptr++; len = *ptr++;
379		uh = (ucode_header_amd_t *)ptr;
380		ptr += len >> 2; size -= len;
381
382		if (uh->uh_nb_id) {
383			cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
384			    "chipset id %x, revision %x",
385			    uh->uh_nb_id, uh->uh_nb_rev);
386			uh->uh_cpu_rev = 0xffff;
387		}
388
389		if (uh->uh_sb_id) {
390			cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
391			    "chipset id %x, revision %x",
392			    uh->uh_sb_id, uh->uh_sb_rev);
393			uh->uh_cpu_rev = 0xffff;
394		}
395	}
396}
397#endif
398
399/*
400 * Populate the ucode file structure from microcode file corresponding to
401 * this CPU, if exists.
402 *
403 * Return EM_OK on success, corresponding error code on failure.
404 */
405/*ARGSUSED*/
406static ucode_errno_t
407ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
408{
409	char name[MAXPATHLEN];
410	intptr_t fd;
411	int count, rc;
412	ucode_file_amd_t *ucodefp = ufp->amd;
413
414#ifndef __xpv
415	uint16_t eq_sig = 0;
416	int i;
417
418	/* get equivalent CPU id */
419	if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
420		return (rc);
421
422	/*
423	 * Allocate a buffer for the microcode patch. If the buffer has been
424	 * allocated before, check for a matching microcode to avoid loading
425	 * the file again.
426	 */
427	if (ucodefp == NULL)
428		ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
429	else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
430	    == EM_OK)
431		return (EM_OK);
432
433	if (ucodefp == NULL)
434		return (EM_NOMEM);
435
436	ufp->amd = ucodefp;
437
438	/*
439	 * Find the patch for this CPU. The patch files are named XXXX-YY, where
440	 * XXXX is the equivalent CPU id and YY is the running patch number.
441	 * Patches specific to certain chipsets are guaranteed to have lower
442	 * numbers than less specific patches, so we can just load the first
443	 * patch that matches.
444	 */
445
446	for (i = 0; i < 0xff; i++) {
447		(void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
448		    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
449		if ((fd = kobj_open(name)) == -1)
450			return (EM_NOMATCH);
451		count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
452		(void) kobj_close(fd);
453
454		if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
455			return (EM_OK);
456	}
457	return (EM_NOMATCH);
458#else
459	int size = 0;
460	char c;
461
462	/*
463	 * The xVM case is special. To support mixed-revision systems, the
464	 * hypervisor will choose which patch to load for which CPU, so the
465	 * whole microcode patch container file will have to be loaded.
466	 *
467	 * Since this code is only run on the boot cpu, we don't have to care
468	 * about failing ucode_zalloc() or freeing allocated memory.
469	 */
470	if (cp->cpu_id != 0)
471		return (EM_INVALIDARG);
472
473	(void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
474	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
475
476	if ((fd = kobj_open(name)) == -1)
477		return (EM_OPENFILE);
478
479	/* get the file size by counting bytes */
480	do {
481		count = kobj_read(fd, &c, 1, size);
482		size += count;
483	} while (count);
484
485	ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
486	ASSERT(ucodefp);
487	ufp->amd = ucodefp;
488
489	ucodefp->usize = size;
490	ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
491	ASSERT(ucodefp->ucodep);
492
493	/* load the microcode patch container file */
494	count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
495	(void) kobj_close(fd);
496
497	if (count != size)
498		return (EM_FILESIZE);
499
500	/* make sure the container file is valid */
501	rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
502
503	if (rc != EM_OK)
504		return (rc);
505
506	/* disable chipset-specific patches */
507	ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
508
509	return (EM_OK);
510#endif
511}
512
513static ucode_errno_t
514ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
515{
516	char		name[MAXPATHLEN];
517	intptr_t	fd;
518	int		count;
519	int		header_size = UCODE_HEADER_SIZE_INTEL;
520	int		cpi_sig = cpuid_getsig(cp);
521	ucode_errno_t	rc = EM_OK;
522	ucode_file_intel_t *ucodefp = &ufp->intel;
523
524	ASSERT(ucode);
525
526	/*
527	 * If the microcode matches the CPU we are processing, use it.
528	 */
529	if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
530	    ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
531		return (EM_OK);
532	}
533
534	/*
535	 * Look for microcode file with the right name.
536	 */
537	(void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
538	    UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
539	    uinfop->cui_platid);
540	if ((fd = kobj_open(name)) == -1) {
541		return (EM_OPENFILE);
542	}
543
544	/*
545	 * We found a microcode file for the CPU we are processing,
546	 * reset the microcode data structure and read in the new
547	 * file.
548	 */
549	ucode->file_reset(ufp, cp->cpu_id);
550
551	ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
552	if (ucodefp->uf_header == NULL)
553		return (EM_NOMEM);
554
555	count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
556
557	switch (count) {
558	case UCODE_HEADER_SIZE_INTEL: {
559
560		ucode_header_intel_t	*uhp = ucodefp->uf_header;
561		uint32_t	offset = header_size;
562		int		total_size, body_size, ext_size;
563		uint32_t	sum = 0;
564
565		/*
566		 * Make sure that the header contains valid fields.
567		 */
568		if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
569			total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
570			body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
571			ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
572			if (ucodefp->uf_body == NULL) {
573				rc = EM_NOMEM;
574				break;
575			}
576
577			if (kobj_read(fd, (char *)ucodefp->uf_body,
578			    body_size, offset) != body_size)
579				rc = EM_FILESIZE;
580		}
581
582		if (rc)
583			break;
584
585		sum = ucode_checksum_intel(0, header_size,
586		    (uint8_t *)ucodefp->uf_header);
587		if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
588			rc = EM_CHECKSUM;
589			break;
590		}
591
592		/*
593		 * Check to see if there is extended signature table.
594		 */
595		offset = body_size + header_size;
596		ext_size = total_size - offset;
597
598		if (ext_size <= 0)
599			break;
600
601		ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
602		if (ucodefp->uf_ext_table == NULL) {
603			rc = EM_NOMEM;
604			break;
605		}
606
607		if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
608		    ext_size, offset) != ext_size) {
609			rc = EM_FILESIZE;
610		} else if (ucode_checksum_intel(0, ext_size,
611		    (uint8_t *)(ucodefp->uf_ext_table))) {
612			rc = EM_CHECKSUM;
613		} else {
614			int i;
615
616			ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
617			for (i = 0; i < ucodefp->uf_ext_table->uet_count;
618			    i++) {
619				if (ucode_checksum_intel(0,
620				    UCODE_EXT_SIG_SIZE_INTEL,
621				    (uint8_t *)(&(ucodefp->uf_ext_table->
622				    uet_ext_sig[i])))) {
623					rc = EM_CHECKSUM;
624					break;
625				}
626			}
627		}
628		break;
629	}
630
631	default:
632		rc = EM_FILESIZE;
633		break;
634	}
635
636	kobj_close(fd);
637
638	if (rc != EM_OK)
639		return (rc);
640
641	rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
642	    ucodefp->uf_ext_table);
643
644	return (rc);
645}
646
647#ifndef __xpv
648static ucode_errno_t
649ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
650    ucode_file_amd_t *ucodefp, int size)
651{
652	ucode_header_amd_t *uh;
653
654	if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
655		return (EM_NOMATCH);
656
657	uh = &ucodefp->uf_header;
658
659	/*
660	 * Don't even think about loading patches that would require code
661	 * execution. Does not apply to patches for family 0x14 and beyond.
662	 */
663	if (uh->uh_cpu_rev < 0x5000 &&
664	    size > offsetof(ucode_file_amd_t, uf_code_present) &&
665	    ucodefp->uf_code_present)
666		return (EM_NOMATCH);
667
668	if (eq_sig != uh->uh_cpu_rev)
669		return (EM_NOMATCH);
670
671	if (uh->uh_nb_id) {
672		cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
673		    "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
674		return (EM_NOMATCH);
675	}
676
677	if (uh->uh_sb_id) {
678		cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
679		    "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
680		return (EM_NOMATCH);
681	}
682
683	if (uh->uh_patch_id <= uinfop->cui_rev && !ucode_force_update)
684		return (EM_HIGHERREV);
685
686	return (EM_OK);
687}
688#endif
689
690/*
691 * Returns 1 if the microcode is for this processor; 0 otherwise.
692 */
693static ucode_errno_t
694ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
695    ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
696{
697	if (uhp == NULL)
698		return (EM_NOMATCH);
699
700	if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
701	    uinfop->cui_platid, uhp->uh_proc_flags)) {
702
703		if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
704			return (EM_HIGHERREV);
705
706		return (EM_OK);
707	}
708
709	if (uetp != NULL) {
710		int i;
711
712		for (i = 0; i < uetp->uet_count; i++) {
713			ucode_ext_sig_intel_t *uesp;
714
715			uesp = &uetp->uet_ext_sig[i];
716
717			if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
718			    uinfop->cui_platid, uesp->ues_proc_flags)) {
719
720				if (uinfop->cui_rev >= uhp->uh_rev &&
721				    !ucode_force_update)
722					return (EM_HIGHERREV);
723
724				return (EM_OK);
725			}
726		}
727	}
728
729	return (EM_NOMATCH);
730}
731
732/*ARGSUSED*/
733static int
734ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
735{
736	ucode_update_t *uusp = (ucode_update_t *)arg1;
737	cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
738#ifndef __xpv
739	on_trap_data_t otd;
740#endif
741
742	ASSERT(ucode);
743	ASSERT(uusp->ucodep);
744
745#ifndef	__xpv
746	/*
747	 * Check one more time to see if it is really necessary to update
748	 * microcode just in case this is a hyperthreaded processor where
749	 * the threads share the same microcode.
750	 */
751	if (!ucode_force_update) {
752		ucode->read_rev(uinfop);
753		uusp->new_rev = uinfop->cui_rev;
754		if (uinfop->cui_rev >= uusp->expected_rev)
755			return (0);
756	}
757
758	if (!on_trap(&otd, OT_DATA_ACCESS)) {
759		/*
760		 * On some platforms a cache invalidation is required for the
761		 * ucode update to be successful due to the parts of the
762		 * processor that the microcode is updating.
763		 */
764		invalidate_cache();
765		wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
766	}
767
768	no_trap();
769#endif
770	ucode->read_rev(uinfop);
771	uusp->new_rev = uinfop->cui_rev;
772
773	return (0);
774}
775
776/*ARGSUSED*/
777static uint32_t
778ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
779{
780	ucode_file_amd_t *ucodefp = ufp->amd;
781#ifdef	__xpv
782	ucode_update_t uus;
783#else
784	on_trap_data_t otd;
785#endif
786
787	ASSERT(ucode);
788	ASSERT(ucodefp);
789
790#ifndef	__xpv
791	kpreempt_disable();
792	if (on_trap(&otd, OT_DATA_ACCESS)) {
793		no_trap();
794		kpreempt_enable();
795		return (0);
796	}
797	wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
798	no_trap();
799	ucode->read_rev(uinfop);
800	kpreempt_enable();
801
802	return (ucodefp->uf_header.uh_patch_id);
803#else
804	uus.ucodep = ucodefp->ucodep;
805	uus.usize = ucodefp->usize;
806	ucode_load_xpv(&uus);
807	ucode->read_rev(uinfop);
808	uus.new_rev = uinfop->cui_rev;
809
810	return (uus.new_rev);
811#endif
812}
813
814/*ARGSUSED2*/
815static uint32_t
816ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
817{
818	ucode_file_intel_t *ucodefp = &ufp->intel;
819#ifdef __xpv
820	uint32_t ext_offset;
821	uint32_t body_size;
822	uint32_t ext_size;
823	uint8_t *ustart;
824	uint32_t usize;
825	ucode_update_t uus;
826#endif
827
828	ASSERT(ucode);
829
830#ifdef __xpv
831	/*
832	 * the hypervisor wants the header, data, and extended
833	 * signature tables. We can only get here from the boot
834	 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
835	 * use BOP_ALLOC().
836	 */
837	usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
838	ustart = ucode_zalloc(cp->cpu_id, usize);
839	ASSERT(ustart);
840
841	body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
842	ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
843	ext_size = usize - ext_offset;
844	ASSERT(ext_size >= 0);
845
846	(void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
847	(void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
848	    body_size);
849	if (ext_size > 0) {
850		(void) memcpy(&ustart[ext_offset],
851		    ucodefp->uf_ext_table, ext_size);
852	}
853	uus.ucodep = ustart;
854	uus.usize = usize;
855	ucode_load_xpv(&uus);
856	ucode->read_rev(uinfop);
857	uus.new_rev = uinfop->cui_rev;
858#else
859	kpreempt_disable();
860	/*
861	 * On some platforms a cache invalidation is required for the
862	 * ucode update to be successful due to the parts of the
863	 * processor that the microcode is updating.
864	 */
865	invalidate_cache();
866	wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
867	ucode->read_rev(uinfop);
868	kpreempt_enable();
869#endif
870
871	return (ucodefp->uf_header->uh_rev);
872}
873
874
875#ifdef	__xpv
876static void
877ucode_load_xpv(ucode_update_t *uusp)
878{
879	xen_platform_op_t op;
880	int e;
881
882	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
883
884	kpreempt_disable();
885	op.cmd = XENPF_microcode_update;
886	op.interface_version = XENPF_INTERFACE_VERSION;
887	/*LINTED: constant in conditional context*/
888	set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
889	op.u.microcode.length = uusp->usize;
890	e = HYPERVISOR_platform_op(&op);
891	if (e != 0) {
892		cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
893	}
894	kpreempt_enable();
895}
896#endif /* __xpv */
897
898static void
899ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
900{
901	uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
902}
903
904static void
905ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
906{
907	struct cpuid_regs crs;
908
909	/*
910	 * The Intel 64 and IA-32 Architecture Software Developer's Manual
911	 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
912	 * execute cpuid to guarantee the correct reading of this register.
913	 */
914	wrmsr(MSR_INTC_UCODE_REV, 0);
915	(void) __cpuid_insn(&crs);
916	uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
917}
918
919static ucode_errno_t
920ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
921{
922#ifndef __xpv
923	uint32_t *ptr = (uint32_t *)ucodep;
924	ucode_eqtbl_amd_t *eqtbl;
925	ucode_file_amd_t *ufp;
926	int count;
927	int higher = 0;
928	ucode_errno_t rc = EM_NOMATCH;
929	uint16_t eq_sig;
930
931	/* skip over magic number & equivalence table header */
932	ptr += 2; size -= 8;
933
934	count = *ptr++; size -= 4;
935	for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
936	    eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
937	    eqtbl++)
938		;
939
940	eq_sig = eqtbl->ue_equiv_cpu;
941
942	/* No equivalent CPU id found, assume outdated microcode file. */
943	if (eq_sig == 0)
944		return (EM_HIGHERREV);
945
946	/* Use the first microcode patch that matches. */
947	do {
948		ptr += count >> 2; size -= count;
949
950		if (!size)
951			return (higher ? EM_HIGHERREV : EM_NOMATCH);
952
953		ptr++; size -= 4;
954		count = *ptr++; size -= 4;
955		ufp = (ucode_file_amd_t *)ptr;
956
957		rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
958		if (rc == EM_HIGHERREV)
959			higher = 1;
960	} while (rc != EM_OK);
961
962	uusp->ucodep = (uint8_t *)ufp;
963	uusp->usize = count;
964	uusp->expected_rev = ufp->uf_header.uh_patch_id;
965#else
966	/*
967	 * The hypervisor will choose the patch to load, so there is no way to
968	 * know the "expected revision" in advance. This is especially true on
969	 * mixed-revision systems where more than one patch will be loaded.
970	 */
971	uusp->expected_rev = 0;
972	uusp->ucodep = ucodep;
973	uusp->usize = size;
974
975	ucode_chipset_amd(ucodep, size);
976#endif
977
978	return (EM_OK);
979}
980
981static ucode_errno_t
982ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
983{
984	uint32_t	header_size = UCODE_HEADER_SIZE_INTEL;
985	int		remaining;
986	int		found = 0;
987	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
988
989	/*
990	 * Go through the whole buffer in case there are
991	 * multiple versions of matching microcode for this
992	 * processor.
993	 */
994	for (remaining = size; remaining > 0; ) {
995		int	total_size, body_size, ext_size;
996		uint8_t	*curbuf = &ucodep[size - remaining];
997		ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
998		ucode_ext_table_intel_t *uetp = NULL;
999		ucode_errno_t tmprc;
1000
1001		total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
1002		body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
1003		ext_size = total_size - (header_size + body_size);
1004
1005		if (ext_size > 0)
1006			uetp = (ucode_ext_table_intel_t *)
1007			    &curbuf[header_size + body_size];
1008
1009		tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
1010
1011		/*
1012		 * Since we are searching through a big file
1013		 * containing microcode for pretty much all the
1014		 * processors, we are bound to get EM_NOMATCH
1015		 * at one point.  However, if we return
1016		 * EM_NOMATCH to users, it will really confuse
1017		 * them.  Therefore, if we ever find a match of
1018		 * a lower rev, we will set return code to
1019		 * EM_HIGHERREV.
1020		 */
1021		if (tmprc == EM_HIGHERREV)
1022			search_rc = EM_HIGHERREV;
1023
1024		if (tmprc == EM_OK &&
1025		    uusp->expected_rev < uhp->uh_rev) {
1026#ifndef __xpv
1027			uusp->ucodep = (uint8_t *)&curbuf[header_size];
1028#else
1029			uusp->ucodep = (uint8_t *)curbuf;
1030#endif
1031			uusp->usize =
1032			    UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
1033			uusp->expected_rev = uhp->uh_rev;
1034			found = 1;
1035		}
1036
1037		remaining -= total_size;
1038	}
1039
1040	if (!found)
1041		return (search_rc);
1042
1043	return (EM_OK);
1044}
1045/*
1046 * Entry point to microcode update from the ucode_drv driver.
1047 *
1048 * Returns EM_OK on success, corresponding error code on failure.
1049 */
1050ucode_errno_t
1051ucode_update(uint8_t *ucodep, int size)
1052{
1053	int		found = 0;
1054	processorid_t	id;
1055	ucode_update_t	cached = { 0 };
1056	ucode_update_t	*cachedp = NULL;
1057	ucode_errno_t	rc = EM_OK;
1058	ucode_errno_t	search_rc = EM_NOMATCH; /* search result */
1059	cpuset_t cpuset;
1060
1061	ASSERT(ucode);
1062	ASSERT(ucodep);
1063	CPUSET_ZERO(cpuset);
1064
1065	if (!ucode->capable(CPU))
1066		return (EM_NOTSUP);
1067
1068	mutex_enter(&cpu_lock);
1069
1070	for (id = 0; id < max_ncpus; id++) {
1071		cpu_t *cpu;
1072		ucode_update_t uus = { 0 };
1073		ucode_update_t *uusp = &uus;
1074
1075		/*
1076		 * If there is no such CPU or it is not xcall ready, skip it.
1077		 */
1078		if ((cpu = cpu_get(id)) == NULL ||
1079		    !(cpu->cpu_flags & CPU_READY))
1080			continue;
1081
1082		uusp->sig = cpuid_getsig(cpu);
1083		bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1084		    sizeof (uusp->info));
1085
1086		/*
1087		 * If the current CPU has the same signature and platform
1088		 * id as the previous one we processed, reuse the information.
1089		 */
1090		if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1091		    cachedp->info.cui_platid == uusp->info.cui_platid) {
1092			uusp->ucodep = cachedp->ucodep;
1093			uusp->expected_rev = cachedp->expected_rev;
1094			/*
1095			 * Intuitively we should check here to see whether the
1096			 * running microcode rev is >= the expected rev, and
1097			 * quit if it is.  But we choose to proceed with the
1098			 * xcall regardless of the running version so that
1099			 * the other threads in an HT processor can update
1100			 * the cpu_ucode_info structure in machcpu.
1101			 */
1102		} else if ((search_rc = ucode->extract(uusp, ucodep, size))
1103		    == EM_OK) {
1104			bcopy(uusp, &cached, sizeof (cached));
1105			cachedp = &cached;
1106			found = 1;
1107		}
1108
1109		/* Nothing to do */
1110		if (uusp->ucodep == NULL)
1111			continue;
1112
1113#ifdef	__xpv
1114		/*
1115		 * for i86xpv, the hypervisor will update all the CPUs.
1116		 * the hypervisor wants the header, data, and extended
1117		 * signature tables. ucode_write will just read in the
1118		 * updated version on all the CPUs after the update has
1119		 * completed.
1120		 */
1121		if (id == 0) {
1122			ucode_load_xpv(uusp);
1123		}
1124#endif
1125
1126		CPUSET_ADD(cpuset, id);
1127		kpreempt_disable();
1128		xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
1129		kpreempt_enable();
1130		CPUSET_DEL(cpuset, id);
1131
1132		if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
1133		    !ucode_force_update) {
1134			rc = EM_HIGHERREV;
1135		} else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1136		    uusp->expected_rev != uusp->new_rev)) {
1137			cmn_err(CE_WARN, ucode_failure_fmt,
1138			    id, uusp->info.cui_rev, uusp->expected_rev);
1139			rc = EM_UPDATE;
1140		} else {
1141			cmn_err(CE_CONT, ucode_success_fmt,
1142			    id, uusp->info.cui_rev, uusp->new_rev);
1143		}
1144	}
1145
1146	mutex_exit(&cpu_lock);
1147
1148	if (!found) {
1149		rc = search_rc;
1150	} else if (rc == EM_OK) {
1151		cpuid_post_ucodeadm();
1152	}
1153
1154	return (rc);
1155}
1156
1157/*
1158 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1159 * This is the entry point from boot path where pointer to CPU structure
1160 * is available.
1161 *
1162 * cpuid_info must be initialized before ucode_check can be called.
1163 */
1164void
1165ucode_check(cpu_t *cp)
1166{
1167	cpu_ucode_info_t *uinfop;
1168	ucode_errno_t rc = EM_OK;
1169	uint32_t new_rev = 0;
1170
1171	ASSERT(cp);
1172	/*
1173	 * Space statically allocated for BSP, ensure pointer is set
1174	 */
1175	if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL)
1176		cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1177
1178	uinfop = cp->cpu_m.mcpu_ucode_info;
1179	ASSERT(uinfop);
1180
1181	/* set up function pointers if not already done */
1182	if (!ucode)
1183		switch (cpuid_getvendor(cp)) {
1184		case X86_VENDOR_AMD:
1185			ucode = &ucode_amd;
1186			break;
1187		case X86_VENDOR_Intel:
1188			ucode = &ucode_intel;
1189			break;
1190		default:
1191			ucode = NULL;
1192			return;
1193		}
1194
1195	if (!ucode->capable(cp))
1196		return;
1197
1198	/*
1199	 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1200	 * (Family 6, model 5 and above) and all processors after.
1201	 */
1202	if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1203	    ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1204		uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1205		    INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1206	}
1207
1208	ucode->read_rev(uinfop);
1209
1210#ifdef	__xpv
1211	/*
1212	 * for i86xpv, the hypervisor will update all the CPUs. We only need
1213	 * do do this on one of the CPUs (and there always is a CPU 0).
1214	 */
1215	if (cp->cpu_id != 0) {
1216		return;
1217	}
1218#endif
1219
1220	/*
1221	 * Check to see if we need ucode update
1222	 */
1223	if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1224		new_rev = ucode->load(&ucodefile, uinfop, cp);
1225
1226		if (uinfop->cui_rev != new_rev)
1227			cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1228			    uinfop->cui_rev, new_rev);
1229	}
1230
1231	/*
1232	 * If we fail to find a match for any reason, free the file structure
1233	 * just in case we have read in a partial file.
1234	 *
1235	 * Since the scratch memory for holding the microcode for the boot CPU
1236	 * came from BOP_ALLOC, we will reset the data structure as if we
1237	 * never did the allocation so we don't have to keep track of this
1238	 * special chunk of memory.  We free the memory used for the rest
1239	 * of the CPUs in start_other_cpus().
1240	 */
1241	if (rc != EM_OK || cp->cpu_id == 0)
1242		ucode->file_reset(&ucodefile, cp->cpu_id);
1243}
1244
1245/*
1246 * Returns microcode revision from the machcpu structure.
1247 */
1248ucode_errno_t
1249ucode_get_rev(uint32_t *revp)
1250{
1251	int i;
1252
1253	ASSERT(ucode);
1254	ASSERT(revp);
1255
1256	if (!ucode->capable(CPU))
1257		return (EM_NOTSUP);
1258
1259	mutex_enter(&cpu_lock);
1260	for (i = 0; i < max_ncpus; i++) {
1261		cpu_t *cpu;
1262
1263		if ((cpu = cpu_get(i)) == NULL)
1264			continue;
1265
1266		revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1267	}
1268	mutex_exit(&cpu_lock);
1269
1270	return (EM_OK);
1271}
1272