xref: /illumos-gate/usr/src/uts/intel/os/cpuid.c (revision dd23d762)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25  * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
26  * Copyright 2020 Joyent, Inc.
27  * Copyright 2023 Oxide Computer Company
28  * Copyright 2022 MNX Cloud, Inc.
29  */
30 /*
31  * Copyright (c) 2010, Intel Corporation.
32  * All rights reserved.
33  */
34 /*
35  * Portions Copyright 2009 Advanced Micro Devices, Inc.
36  */
37 
38 /*
39  * CPU Identification logic
40  *
41  * The purpose of this file and its companion, cpuid_subr.c, is to help deal
42  * with the identification of CPUs, their features, and their topologies. More
43  * specifically, this file helps drive the following:
44  *
45  * 1. Enumeration of features of the processor which are used by the kernel to
46  *    determine what features to enable or disable. These may be instruction set
47  *    enhancements or features that we use.
48  *
49  * 2. Enumeration of instruction set architecture (ISA) additions that userland
50  *    will be told about through the auxiliary vector.
51  *
52  * 3. Understanding the physical topology of the CPU such as the number of
53  *    caches, how many cores it has, whether or not it supports symmetric
54  *    multi-processing (SMT), etc.
55  *
56  * ------------------------
57  * CPUID History and Basics
58  * ------------------------
59  *
60  * The cpuid instruction was added by Intel roughly around the time that the
61  * original Pentium was introduced. The purpose of cpuid was to tell in a
62  * programmatic fashion information about the CPU that previously was guessed
63  * at. For example, an important part of cpuid is that we can know what
64  * extensions to the ISA exist. If you use an invalid opcode you would get a
65  * #UD, so this method allows a program (whether a user program or the kernel)
66  * to determine what exists without crashing or getting a SIGILL. Of course,
67  * this was also during the era of the clones and the AMD Am5x86. The vendor
68  * name shows up first in cpuid for a reason.
69  *
70  * cpuid information is broken down into ranges called a 'leaf'. Each leaf puts
71  * unique values into the registers %eax, %ebx, %ecx, and %edx and each leaf has
72  * its own meaning. The different leaves are broken down into different regions:
73  *
74  *	[ 0, 7fffffff ]			This region is called the 'basic'
75  *					region. This region is generally defined
76  *					by Intel, though some of the original
77  *					portions have different meanings based
78  *					on the manufacturer. These days, Intel
79  *					adds most new features to this region.
80  *					AMD adds non-Intel compatible
81  *					information in the third, extended
82  *					region. Intel uses this for everything
83  *					including ISA extensions, CPU
84  *					features, cache information, topology,
85  *					and more.
86  *
87  *					There is a hole carved out of this
88  *					region which is reserved for
89  *					hypervisors.
90  *
91  *	[ 40000000, 4fffffff ]		This region, which is found in the
92  *					middle of the previous region, is
93  *					explicitly promised to never be used by
94  *					CPUs. Instead, it is used by hypervisors
95  *					to communicate information about
96  *					themselves to the operating system. The
97  *					values and details are unique for each
98  *					hypervisor.
99  *
100  *	[ 80000000, ffffffff ]		This region is called the 'extended'
101  *					region. Some of the low leaves mirror
102  *					parts of the basic leaves. This region
103  *					has generally been used by AMD for
104  *					various extensions. For example, AMD-
105  *					specific information about caches,
106  *					features, and topology are found in this
107  *					region.
108  *
109  * To specify a range, you place the desired leaf into %eax, zero %ebx, %ecx,
110  * and %edx, and then issue the cpuid instruction. At the first leaf in each of
111  * the ranges, one of the primary things returned is the maximum valid leaf in
112  * that range. This allows for discovery of what range of CPUID is valid.
113  *
114  * The CPUs have potentially surprising behavior when using an invalid leaf or
115  * unimplemented leaf. If the requested leaf is within the valid basic or
116  * extended range, but is unimplemented, then %eax, %ebx, %ecx, and %edx will be
117  * set to zero. However, if you specify a leaf that is outside of a valid range,
118  * then instead it will be filled with the last valid _basic_ leaf. For example,
119  * if the maximum basic value is on leaf 0x3, then issuing a cpuid for leaf 4 or
120  * an invalid extended leaf will return the information for leaf 3.
121  *
122  * Some leaves are broken down into sub-leaves. This means that the value
123  * depends on both the leaf asked for in %eax and a secondary register. For
124  * example, Intel uses the value in %ecx on leaf 7 to indicate a sub-leaf to get
125  * additional information. Or when getting topology information in leaf 0xb, the
126  * initial value in %ecx changes which level of the topology that you are
127  * getting information about.
128  *
129  * cpuid values are always kept to 32 bits regardless of whether or not the
130  * program is in 64-bit mode. When executing in 64-bit mode, the upper
131  * 32 bits of the register are always set to zero so that way the values are the
132  * same regardless of execution mode.
133  *
134  * ----------------------
135  * Identifying Processors
136  * ----------------------
137  *
138  * We can identify a processor in two steps. The first step looks at cpuid leaf
139  * 0. Leaf 0 contains the processor's vendor information. This is done by
140  * putting a 12 character string in %ebx, %ecx, and %edx. On AMD, it is
141  * 'AuthenticAMD' and on Intel it is 'GenuineIntel'.
142  *
143  * From there, a processor is identified by a combination of three different
144  * values:
145  *
146  *  1. Family
147  *  2. Model
148  *  3. Stepping
149  *
150  * Each vendor uses the family and model to uniquely identify a processor. The
151  * way that family and model are changed depends on the vendor. For example,
152  * Intel has been using family 0x6 for almost all of their processor since the
153  * Pentium Pro/Pentium II era, often called the P6. The model is used to
154  * identify the exact processor. Different models are often used for the client
155  * (consumer) and server parts. Even though each processor often has major
156  * architectural differences, they still are considered the same family by
157  * Intel.
158  *
159  * On the other hand, each major AMD architecture generally has its own family.
160  * For example, the K8 is family 0x10, Bulldozer 0x15, and Zen 0x17. Within it
161  * the model number is used to help identify specific processors.  As AMD's
162  * product lines have expanded, they have started putting a mixed bag of
163  * processors into the same family, with each processor under a single
164  * identifying banner (e.g., Milan, Cezanne) using a range of model numbers.  We
165  * refer to each such collection as a processor family, distinct from cpuid
166  * family.  Importantly, each processor family has a BIOS and Kernel Developer's
167  * Guide (BKDG, older parts) or Processor Programming Reference (PPR) that
168  * defines the processor family's non-architectural features.  In general, we'll
169  * use "family" here to mean the family number reported by the cpuid instruction
170  * and distinguish the processor family from it where appropriate.
171  *
172  * The stepping is used to refer to a revision of a specific microprocessor. The
173  * term comes from equipment used to produce masks that are used to create
174  * integrated circuits.
175  *
176  * The information is present in leaf 1, %eax. In technical documentation you
177  * will see the terms extended model and extended family. The original family,
178  * model, and stepping fields were each 4 bits wide. If the values in either
179  * are 0xf, then one is to consult the extended model and extended family, which
180  * take previously reserved bits and allow for a larger number of models and add
181  * 0xf to them.
182  *
183  * When we process this information, we store the full family, model, and
184  * stepping in the struct cpuid_info members cpi_family, cpi_model, and
185  * cpi_step, respectively. Whenever you are performing comparisons with the
186  * family, model, and stepping, you should use these members and not the raw
187  * values from cpuid. If you must use the raw values from cpuid directly, you
188  * must make sure that you add the extended model and family to the base model
189  * and family.
190  *
191  * In general, we do not use information about the family, model, and stepping
192  * to determine whether or not a feature is present; that is generally driven by
193  * specific leaves. However, when something we care about on the processor is
194  * not considered 'architectural' meaning that it is specific to a set of
195  * processors and not promised in the architecture model to be consistent from
196  * generation to generation, then we will fall back on this information. The
197  * most common cases where this comes up is when we have to workaround errata in
198  * the processor, are dealing with processor-specific features such as CPU
199  * performance counters, or we want to provide additional information for things
200  * such as fault management.
201  *
202  * While processors also do have a brand string, which is the name that people
203  * are familiar with when buying the processor, they are not meant for
204  * programmatic consumption. That is what the family, model, and stepping are
205  * for.
206  *
207  * We use the x86_chiprev_t to encode a combination of vendor, processor family,
208  * and stepping(s) that refer to a single or very closely related set of silicon
209  * implementations; while there are sometimes more specific ways to learn of the
210  * presence or absence of a particular erratum or workaround, one may generally
211  * assume that all processors of the same chiprev have the same errata and we
212  * have chosen to represent them this way precisely because that is how AMD
213  * groups them in their revision guides (errata documentation).  The processor
214  * family (x86_processor_family_t) may be extracted from the chiprev if that
215  * level of detail is not needed.  Processor families are considered unordered
216  * but revisions within a family may be compared for either an exact match or at
217  * least as recent as a reference revision.  See the chiprev_xxx() functions
218  * below.
219  *
220  * Similarly, each processor family implements a particular microarchitecture,
221  * which itself may have multiple revisions.  In general, non-architectural
222  * features are specific to a processor family, but some may exist across
223  * families containing cores that implement the same microarchitectural revision
224  * (and, such cores share common bugs, too).  We provide utility routines
225  * analogous to those for extracting and comparing chiprevs for
226  * microarchitectures as well; see the uarch_xxx() functions.
227  *
228  * Both chiprevs and uarchrevs are defined in x86_archext.h and both are at
229  * present used and available only for AMD and AMD-like processors.
230  *
231  * ------------
232  * CPUID Passes
233  * ------------
234  *
235  * As part of performing feature detection, we break this into several different
236  * passes. There used to be a pass 0 that was done from assembly in locore.s to
237  * support processors that have a missing or broken cpuid instruction (notably
238  * certain Cyrix processors) but those were all 32-bit processors which are no
239  * longer supported. Passes are no longer numbered explicitly to make it easier
240  * to break them up or move them around as needed; however, they still have a
241  * well-defined execution ordering enforced by the definition of cpuid_pass_t in
242  * x86_archext.h. The external interface to execute a cpuid pass or determine
243  * whether a pass has been completed consists of cpuid_execpass() and
244  * cpuid_checkpass() respectively.  The passes now, in that execution order,
245  * are as follows:
246  *
247  *	PRELUDE		This pass does not have any dependencies on system
248  *			setup; in particular, unlike all subsequent passes it is
249  *			guaranteed not to require PCI config space access.  It
250  *			sets the flag indicating that the processor we are
251  *			running on supports the cpuid instruction, which all
252  *			64-bit processors do.  This would also be the place to
253  *			add any other basic state that is required later on and
254  *			can be learned without dependencies.
255  *
256  *	IDENT		Determine which vendor manufactured the CPU, the family,
257  *			model, and stepping information, and compute basic
258  *			identifying tags from those values.  This is done first
259  *			so that machine-dependent code can control the features
260  *			the cpuid instruction will report during subsequent
261  *			passes if needed, and so that any intervening
262  *			machine-dependent code that needs basic identity will
263  *			have it available.  This includes synthesised
264  *			identifiers such as chiprev and uarchrev as well as the
265  *			values obtained directly from cpuid.  Prior to executing
266  *			this pass, machine-depedent boot code is responsible for
267  *			ensuring that the PCI configuration space access
268  *			functions have been set up and, if necessary, that
269  *			determine_platform() has been called.
270  *
271  *	BASIC		This is the primary pass and is responsible for doing a
272  *			large number of different things:
273  *
274  *			1. Gathering a large number of feature flags to
275  *			determine which features the CPU support and which
276  *			indicate things that we need to do other work in the OS
277  *			to enable. Features detected this way are added to the
278  *			x86_featureset which can be queried to
279  *			determine what we should do. This includes processing
280  *			all of the basic and extended CPU features that we care
281  *			about.
282  *
283  *			2. Determining the CPU's topology. This includes
284  *			information about how many cores and threads are present
285  *			in the package. It also is responsible for figuring out
286  *			which logical CPUs are potentially part of the same core
287  *			and what other resources they might share. For more
288  *			information see the 'Topology' section.
289  *
290  *			3. Determining the set of CPU security-specific features
291  *			that we need to worry about and determine the
292  *			appropriate set of workarounds.
293  *
294  *			Pass 1 on the boot CPU occurs before KMDB is started.
295  *
296  *	EXTENDED	The second pass is done after startup(). Here, we check
297  *			other miscellaneous features. Most of this is gathering
298  *			additional basic and extended features that we'll use in
299  *			later passes or for debugging support.
300  *
301  *	DYNAMIC		The third pass occurs after the kernel memory allocator
302  *			has been fully initialized. This gathers information
303  *			where we might need dynamic memory available for our
304  *			uses. This includes several varying width leaves that
305  *			have cache information and the processor's brand string.
306  *
307  *	RESOLVE		The fourth and final normal pass is performed after the
308  *			kernel has brought most everything online. This is
309  *			invoked from post_startup(). In this pass, we go through
310  *			the set of features that we have enabled and turn that
311  *			into the hardware auxiliary vector features that
312  *			userland receives. This is used by userland, primarily
313  *			by the run-time link-editor (RTLD), though userland
314  *			software could also refer to it directly.
315  *
316  * The function that performs a pass is currently assumed to be infallible, and
317  * all existing implementation are.  This simplifies callers by allowing
318  * cpuid_execpass() to return void. Similarly, implementers do not need to check
319  * for a NULL CPU argument; the current CPU's cpu_t is substituted if necessary.
320  * Both of these assumptions can be relaxed if needed by future developments.
321  * Tracking of completed states is handled by cpuid_execpass(). It is programmer
322  * error to attempt to execute a pass before all previous passes have been
323  * completed on the specified CPU, or to request cpuid information before the
324  * pass that captures it has been executed.  These conditions can be tested
325  * using cpuid_checkpass().
326  *
327  * The Microcode Pass
328  *
329  * After a microcode update, we do a selective rescan of the cpuid leaves to
330  * determine what features have changed. Microcode updates can provide more
331  * details about security related features to deal with issues like Spectre and
332  * L1TF. On occasion, vendors have violated their contract and removed bits.
333  * However, we don't try to detect that because that puts us in a situation that
334  * we really can't deal with. As such, the only thing we rescan are security
335  * related features today. See cpuid_pass_ucode().  This pass may be run in a
336  * different sequence on APs and therefore is not part of the sequential order;
337  * It is invoked directly instead of by cpuid_execpass() and its completion
338  * status cannot be checked by cpuid_checkpass().  This could be integrated with
339  * a more complex dependency mechanism if warranted by future developments.
340  *
341  * All of the passes are run on all CPUs. However, for the most part we only
342  * care about what the boot CPU says about this information and use the other
343  * CPUs as a rough guide to sanity check that we have the same feature set.
344  *
345  * We do not support running multiple logical CPUs with disjoint, let alone
346  * different, feature sets.
347  *
348  * ------------------
349  * Processor Topology
350  * ------------------
351  *
352  * One of the important things that we need to do is to understand the topology
353  * of the underlying processor. When we say topology in this case, we're trying
354  * to understand the relationship between the logical CPUs that the operating
355  * system sees and the underlying physical layout. Different logical CPUs may
356  * share different resources which can have important consequences for the
357  * performance of the system. For example, they may share caches, execution
358  * units, and more.
359  *
360  * The topology of the processor changes from generation to generation and
361  * vendor to vendor.  Along with that, different vendors use different
362  * terminology, and the operating system itself uses occasionally overlapping
363  * terminology. It's important to understand what this topology looks like so
364  * one can understand the different things that we try to calculate and
365  * determine.
366  *
367  * To get started, let's talk about a little bit of terminology that we've used
368  * so far, is used throughout this file, and is fairly generic across multiple
369  * vendors:
370  *
371  * CPU
372  *	A central processing unit (CPU) refers to a logical and/or virtual
373  *	entity that the operating system can execute instructions on. The
374  *	underlying resources for this CPU may be shared between multiple
375  *	entities; however, to the operating system it is a discrete unit.
376  *
377  * PROCESSOR and PACKAGE
378  *
379  *	Generally, when we use the term 'processor' on its own, we are referring
380  *	to the physical entity that one buys and plugs into a board. However,
381  *	because processor has been overloaded and one might see it used to mean
382  *	multiple different levels, we will instead use the term 'package' for
383  *	the rest of this file. The term package comes from the electrical
384  *	engineering side and refers to the physical entity that encloses the
385  *	electronics inside. Strictly speaking the package can contain more than
386  *	just the CPU, for example, on many processors it may also have what's
387  *	called an 'integrated graphical processing unit (GPU)'. Because the
388  *	package can encapsulate multiple units, it is the largest physical unit
389  *	that we refer to.
390  *
391  * SOCKET
392  *
393  *	A socket refers to unit on a system board (generally the motherboard)
394  *	that can receive a package. A single package, or processor, is plugged
395  *	into a single socket. A system may have multiple sockets. Often times,
396  *	the term socket is used interchangeably with package and refers to the
397  *	electrical component that has plugged in, and not the receptacle itself.
398  *
399  * CORE
400  *
401  *	A core refers to the physical instantiation of a CPU, generally, with a
402  *	full set of hardware resources available to it. A package may contain
403  *	multiple cores inside of it or it may just have a single one. A
404  *	processor with more than one core is often referred to as 'multi-core'.
405  *	In illumos, we will use the feature X86FSET_CMP to refer to a system
406  *	that has 'multi-core' processors.
407  *
408  *	A core may expose a single logical CPU to the operating system, or it
409  *	may expose multiple CPUs, which we call threads, defined below.
410  *
411  *	Some resources may still be shared by cores in the same package. For
412  *	example, many processors will share the level 3 cache between cores.
413  *	Some AMD generations share hardware resources between cores. For more
414  *	information on that see the section 'AMD Topology'.
415  *
416  * THREAD and STRAND
417  *
418  *	In this file, generally a thread refers to a hardware resources and not
419  *	the operating system's logical abstraction. A thread is always exposed
420  *	as an independent logical CPU to the operating system. A thread belongs
421  *	to a specific core. A core may have more than one thread. When that is
422  *	the case, the threads that are part of the same core are often referred
423  *	to as 'siblings'.
424  *
425  *	When multiple threads exist, this is generally referred to as
426  *	simultaneous multi-threading (SMT). When Intel introduced this in their
427  *	processors they called it hyper-threading (HT). When multiple threads
428  *	are active in a core, they split the resources of the core. For example,
429  *	two threads may share the same set of hardware execution units.
430  *
431  *	The operating system often uses the term 'strand' to refer to a thread.
432  *	This helps disambiguate it from the software concept.
433  *
434  * CHIP
435  *
436  *	Unfortunately, the term 'chip' is dramatically overloaded. At its most
437  *	base meaning, it is used to refer to a single integrated circuit, which
438  *	may or may not be the only thing in the package. In illumos, when you
439  *	see the term 'chip' it is almost always referring to the same thing as
440  *	the 'package'. However, many vendors may use chip to refer to one of
441  *	many integrated circuits that have been placed in the package. As an
442  *	example, see the subsequent definition.
443  *
444  *	To try and keep things consistent, we will only use chip when referring
445  *	to the entire integrated circuit package, with the exception of the
446  *	definition of multi-chip module (because it is in the name) and use the
447  *	term 'die' when we want the more general, potential sub-component
448  *	definition.
449  *
450  * DIE
451  *
452  *	A die refers to an integrated circuit. Inside of the package there may
453  *	be a single die or multiple dies. This is sometimes called a 'chip' in
454  *	vendor's parlance, but in this file, we use the term die to refer to a
455  *	subcomponent.
456  *
457  * MULTI-CHIP MODULE
458  *
459  *	A multi-chip module (MCM) refers to putting multiple distinct chips that
460  *	are connected together in the same package. When a multi-chip design is
461  *	used, generally each chip is manufactured independently and then joined
462  *	together in the package. For example, on AMD's Zen microarchitecture
463  *	(family 0x17), the package contains several dies (the second meaning of
464  *	chip from above) that are connected together.
465  *
466  * CACHE
467  *
468  *	A cache is a part of the processor that maintains copies of recently
469  *	accessed memory. Caches are split into levels and then into types.
470  *	Commonly there are one to three levels, called level one, two, and
471  *	three. The lower the level, the smaller it is, the closer it is to the
472  *	execution units of the CPU, and the faster it is to access. The layout
473  *	and design of the cache come in many different flavors, consult other
474  *	resources for a discussion of those.
475  *
476  *	Caches are generally split into two types, the instruction and data
477  *	cache. The caches contain what their names suggest, the instruction
478  *	cache has executable program text, while the data cache has all other
479  *	memory that the processor accesses. As of this writing, data is kept
480  *	coherent between all of the caches on x86, so if one modifies program
481  *	text before it is executed, that will be in the data cache, and the
482  *	instruction cache will be synchronized with that change when the
483  *	processor actually executes those instructions. This coherency also
484  *	covers the fact that data could show up in multiple caches.
485  *
486  *	Generally, the lowest level caches are specific to a core. However, the
487  *	last layer cache is shared between some number of cores. The number of
488  *	CPUs sharing this last level cache is important. This has implications
489  *	for the choices that the scheduler makes, as accessing memory that might
490  *	be in a remote cache after thread migration can be quite expensive.
491  *
492  *	Sometimes, the word cache is abbreviated with a '$', because in US
493  *	English the word cache is pronounced the same as cash. So L1D$ refers to
494  *	the L1 data cache, and L2$ would be the L2 cache. This will not be used
495  *	in the rest of this theory statement for clarity.
496  *
497  * MEMORY CONTROLLER
498  *
499  *	The memory controller is a component that provides access to DRAM. Each
500  *	memory controller can access a set number of DRAM channels. Each channel
501  *	can have a number of DIMMs (sticks of memory) associated with it. A
502  *	given package may have more than one memory controller. The association
503  *	of the memory controller to a group of cores is important as it is
504  *	cheaper to access memory on the controller that you are associated with.
505  *
506  * NUMA
507  *
508  *	NUMA or non-uniform memory access, describes a way that systems are
509  *	built. On x86, any processor core can address all of the memory in the
510  *	system. However, When using multiple sockets or possibly within a
511  *	multi-chip module, some of that memory is physically closer and some of
512  *	it is further. Memory that is further away is more expensive to access.
513  *	Consider the following image of multiple sockets with memory:
514  *
515  *	+--------+                                                +--------+
516  *	| DIMM A |         +----------+      +----------+         | DIMM D |
517  *	+--------+-+       |          |      |          |       +-+------+-+
518  *	  | DIMM B |=======| Socket 0 |======| Socket 1 |=======| DIMM E |
519  *	  +--------+-+     |          |      |          |     +-+------+-+
520  *	    | DIMM C |     +----------+      +----------+     | DIMM F |
521  *	    +--------+                                        +--------+
522  *
523  *	In this example, Socket 0 is closer to DIMMs A-C while Socket 1 is
524  *	closer to DIMMs D-F. This means that it is cheaper for socket 0 to
525  *	access DIMMs A-C and more expensive to access D-F as it has to go
526  *	through Socket 1 to get there. The inverse is true for Socket 1. DIMMs
527  *	D-F are cheaper than A-C. While the socket form is the most common, when
528  *	using multi-chip modules, this can also sometimes occur. For another
529  *	example of this that's more involved, see the AMD topology section.
530  *
531  *
532  * Intel Topology
533  * --------------
534  *
535  * Most Intel processors since Nehalem, (as of this writing the current gen
536  * is Skylake / Cannon Lake) follow a fairly similar pattern. The CPU portion of
537  * the package is a single monolithic die. MCMs currently aren't used. Most
538  * parts have three levels of caches, with the L3 cache being shared between
539  * all of the cores on the package. The L1/L2 cache is generally specific to
540  * an individual core. The following image shows at a simplified level what
541  * this looks like. The memory controller is commonly part of something called
542  * the 'Uncore', that used to be separate physical chips that were not a part of
543  * the package, but are now part of the same chip.
544  *
545  *  +-----------------------------------------------------------------------+
546  *  | Package                                                               |
547  *  |  +-------------------+  +-------------------+  +-------------------+  |
548  *  |  | Core              |  | Core              |  | Core              |  |
549  *  |  |  +--------+ +---+ |  |  +--------+ +---+ |  |  +--------+ +---+ |  |
550  *  |  |  | Thread | | L | |  |  | Thread | | L | |  |  | Thread | | L | |  |
551  *  |  |  +--------+ | 1 | |  |  +--------+ | 1 | |  |  +--------+ | 1 | |  |
552  *  |  |  +--------+ |   | |  |  +--------+ |   | |  |  +--------+ |   | |  |
553  *  |  |  | Thread | |   | |  |  | Thread | |   | |  |  | Thread | |   | |  |
554  *  |  |  +--------+ +---+ |  |  +--------+ +---+ |  |  +--------+ +---+ |  |
555  *  |  |  +--------------+ |  |  +--------------+ |  |  +--------------+ |  |
556  *  |  |  | L2 Cache     | |  |  | L2 Cache     | |  |  | L2 Cache     | |  |
557  *  |  |  +--------------+ |  |  +--------------+ |  |  +--------------+ |  |
558  *  |  +-------------------+  +-------------------+  +-------------------+  |
559  *  | +-------------------------------------------------------------------+ |
560  *  | |                         Shared L3 Cache                           | |
561  *  | +-------------------------------------------------------------------+ |
562  *  | +-------------------------------------------------------------------+ |
563  *  | |                        Memory Controller                          | |
564  *  | +-------------------------------------------------------------------+ |
565  *  +-----------------------------------------------------------------------+
566  *
567  * A side effect of this current architecture is that what we care about from a
568  * scheduling and topology perspective, is simplified. In general we care about
569  * understanding which logical CPUs are part of the same core and socket.
570  *
571  * To determine the relationship between threads and cores, Intel initially used
572  * the identifier in the advanced programmable interrupt controller (APIC). They
573  * also added cpuid leaf 4 to give additional information about the number of
574  * threads and CPUs in the processor. With the addition of x2apic (which
575  * increased the number of addressable logical CPUs from 8-bits to 32-bits), an
576  * additional cpuid topology leaf 0xB was added.
577  *
578  * AMD Topology
579  * ------------
580  *
581  * When discussing AMD topology, we want to break this into three distinct
582  * generations of topology. There's the basic topology that has been used in
583  * family 0xf+ (Opteron, Athlon64), there's the topology that was introduced
584  * with family 0x15 (Bulldozer), and there's the topology that was introduced
585  * with family 0x17 (Zen), evolved more dramatically in Zen 2 (still family
586  * 0x17), and tweaked slightly in Zen 3 (family 19h). AMD also has some
587  * additional terminology that's worth talking about.
588  *
589  * Until the introduction of family 0x17 (Zen), AMD did not implement something
590  * that they considered SMT. Whether or not the AMD processors have SMT
591  * influences many things including scheduling and reliability, availability,
592  * and serviceability (RAS) features.
593  *
594  * NODE
595  *
596  *	AMD uses the term node to refer to a die that contains a number of cores
597  *	and I/O resources. Depending on the processor family and model, more
598  *	than one node can be present in the package. When there is more than one
599  *	node this indicates a multi-chip module. Usually each node has its own
600  *	access to memory and I/O devices. This is important and generally
601  *	different from the corresponding Intel Nehalem-Skylake+ processors. As a
602  *	result, we track this relationship in the operating system.
603  *
604  *	In processors with an L3 cache, the L3 cache is generally shared across
605  *	the entire node, though the way this is carved up varies from generation
606  *	to generation.
607  *
608  * BULLDOZER
609  *
610  *	Starting with the Bulldozer family (0x15) and continuing until the
611  *	introduction of the Zen microarchitecture, AMD introduced the idea of a
612  *	compute unit. In a compute unit, two traditional cores share a number of
613  *	hardware resources. Critically, they share the FPU, L1 instruction
614  *	cache, and the L2 cache. Several compute units were then combined inside
615  *	of a single node.  Because the integer execution units, L1 data cache,
616  *	and some other resources were not shared between the cores, AMD never
617  *	considered this to be SMT.
618  *
619  * ZEN
620  *
621  *	The Zen family (0x17) uses a multi-chip module (MCM) design, the module
622  *	is called Zeppelin. These modules are similar to the idea of nodes used
623  *	previously. Each of these nodes has two DRAM channels which all of the
624  *	cores in the node can access uniformly. These nodes are linked together
625  *	in the package, creating a NUMA environment.
626  *
627  *	The Zeppelin die itself contains two different 'core complexes'. Each
628  *	core complex consists of four cores which each have two threads, for a
629  *	total of 8 logical CPUs per complex. Unlike other generations,
630  *	where all the logical CPUs in a given node share the L3 cache, here each
631  *	core complex has its own shared L3 cache.
632  *
633  *	A further thing that we need to consider is that in some configurations,
634  *	particularly with the Threadripper line of processors, not every die
635  *	actually has its memory controllers wired up to actual memory channels.
636  *	This means that some cores have memory attached to them and others
637  *	don't.
638  *
639  *	To put Zen in perspective, consider the following images:
640  *
641  *      +--------------------------------------------------------+
642  *      | Core Complex                                           |
643  *      | +-------------------+    +-------------------+  +---+  |
644  *      | | Core       +----+ |    | Core       +----+ |  |   |  |
645  *      | | +--------+ | L2 | |    | +--------+ | L2 | |  |   |  |
646  *      | | | Thread | +----+ |    | | Thread | +----+ |  |   |  |
647  *      | | +--------+-+ +--+ |    | +--------+-+ +--+ |  | L |  |
648  *      | |   | Thread | |L1| |    |   | Thread | |L1| |  | 3 |  |
649  *      | |   +--------+ +--+ |    |   +--------+ +--+ |  |   |  |
650  *      | +-------------------+    +-------------------+  | C |  |
651  *      | +-------------------+    +-------------------+  | a |  |
652  *      | | Core       +----+ |    | Core       +----+ |  | c |  |
653  *      | | +--------+ | L2 | |    | +--------+ | L2 | |  | h |  |
654  *      | | | Thread | +----+ |    | | Thread | +----+ |  | e |  |
655  *      | | +--------+-+ +--+ |    | +--------+-+ +--+ |  |   |  |
656  *      | |   | Thread | |L1| |    |   | Thread | |L1| |  |   |  |
657  *      | |   +--------+ +--+ |    |   +--------+ +--+ |  |   |  |
658  *      | +-------------------+    +-------------------+  +---+  |
659  *      |                                                        |
660  *	+--------------------------------------------------------+
661  *
662  *  This first image represents a single Zen core complex that consists of four
663  *  cores.
664  *
665  *
666  *	+--------------------------------------------------------+
667  *	| Zeppelin Die                                           |
668  *	|  +--------------------------------------------------+  |
669  *	|  |         I/O Units (PCIe, SATA, USB, etc.)        |  |
670  *	|  +--------------------------------------------------+  |
671  *      |                           HH                           |
672  *	|          +-----------+    HH    +-----------+          |
673  *	|          |           |    HH    |           |          |
674  *	|          |    Core   |==========|    Core   |          |
675  *	|          |  Complex  |==========|  Complex  |          |
676  *	|          |           |    HH    |           |          |
677  *	|          +-----------+    HH    +-----------+          |
678  *      |                           HH                           |
679  *	|  +--------------------------------------------------+  |
680  *	|  |                Memory Controller                 |  |
681  *	|  +--------------------------------------------------+  |
682  *      |                                                        |
683  *	+--------------------------------------------------------+
684  *
685  *  This image represents a single Zeppelin Die. Note how both cores are
686  *  connected to the same memory controller and I/O units. While each core
687  *  complex has its own L3 cache as seen in the first image, they both have
688  *  uniform access to memory.
689  *
690  *
691  *                      PP                     PP
692  *                      PP                     PP
693  *           +----------PP---------------------PP---------+
694  *           |          PP                     PP         |
695  *           |    +-----------+          +-----------+    |
696  *           |    |           |          |           |    |
697  *       MMMMMMMMM|  Zeppelin |==========|  Zeppelin |MMMMMMMMM
698  *       MMMMMMMMM|    Die    |==========|    Die    |MMMMMMMMM
699  *           |    |           |          |           |    |
700  *           |    +-----------+ooo    ...+-----------+    |
701  *           |          HH      ooo  ...       HH         |
702  *           |          HH        oo..         HH         |
703  *           |          HH        ..oo         HH         |
704  *           |          HH      ...  ooo       HH         |
705  *           |    +-----------+...    ooo+-----------+    |
706  *           |    |           |          |           |    |
707  *       MMMMMMMMM|  Zeppelin |==========|  Zeppelin |MMMMMMMMM
708  *       MMMMMMMMM|    Die    |==========|    Die    |MMMMMMMMM
709  *           |    |           |          |           |    |
710  *           |    +-----------+          +-----------+    |
711  *           |          PP                     PP         |
712  *           +----------PP---------------------PP---------+
713  *                      PP                     PP
714  *                      PP                     PP
715  *
716  *  This image represents a single Zen package. In this example, it has four
717  *  Zeppelin dies, though some configurations only have a single one. In this
718  *  example, each die is directly connected to the next. Also, each die is
719  *  represented as being connected to memory by the 'M' character and connected
720  *  to PCIe devices and other I/O, by the 'P' character. Because each Zeppelin
721  *  die is made up of two core complexes, we have multiple different NUMA
722  *  domains that we care about for these systems.
723  *
724  * ZEN 2
725  *
726  *	Zen 2 changes things in a dramatic way from Zen 1. Whereas in Zen 1
727  *	each Zeppelin Die had its own I/O die, that has been moved out of the
728  *	core complex in Zen 2. The actual core complex looks pretty similar, but
729  *	now the die actually looks much simpler:
730  *
731  *      +--------------------------------------------------------+
732  *      | Zen 2 Core Complex Die    HH                           |
733  *      |                           HH                           |
734  *      |          +-----------+    HH    +-----------+          |
735  *      |          |           |    HH    |           |          |
736  *      |          |    Core   |==========|    Core   |          |
737  *      |          |  Complex  |==========|  Complex  |          |
738  *      |          |           |    HH    |           |          |
739  *      |          +-----------+    HH    +-----------+          |
740  *      |                           HH                           |
741  *      |                           HH                           |
742  *      +--------------------------------------------------------+
743  *
744  *	From here, when we add the central I/O die, this changes things a bit.
745  *	Each die is connected to the I/O die, rather than trying to interconnect
746  *	them directly. The following image takes the same Zen 1 image that we
747  *	had earlier and shows what it looks like with the I/O die instead:
748  *
749  *                                 PP    PP
750  *                                 PP    PP
751  *           +---------------------PP----PP---------------------+
752  *           |                     PP    PP                     |
753  *           |  +-----------+      PP    PP      +-----------+  |
754  *           |  |           |      PP    PP      |           |  |
755  *           |  |   Zen 2   |    +-PP----PP-+    |   Zen 2   |  |
756  *           |  |    Die   _|    | PP    PP |    |_   Die    |  |
757  *           |  |         |o|oooo|          |oooo|o|         |  |
758  *           |  +-----------+    |          |    +-----------+  |
759  *           |                   |   I/O    |                   |
760  *       MMMMMMMMMMMMMMMMMMMMMMMMMM  Die   MMMMMMMMMMMMMMMMMMMMMMMMMM
761  *       MMMMMMMMMMMMMMMMMMMMMMMMMM        MMMMMMMMMMMMMMMMMMMMMMMMMM
762  *           |                   |          |                   |
763  *       MMMMMMMMMMMMMMMMMMMMMMMMMM        MMMMMMMMMMMMMMMMMMMMMMMMMM
764  *       MMMMMMMMMMMMMMMMMMMMMMMMMM        MMMMMMMMMMMMMMMMMMMMMMMMMM
765  *           |                   |          |                   |
766  *           |  +-----------+    |          |    +-----------+  |
767  *           |  |         |o|oooo| PP    PP |oooo|o|         |  |
768  *           |  |   Zen 2  -|    +-PP----PP-+    |-  Zen 2   |  |
769  *           |  |    Die    |      PP    PP      |    Die    |  |
770  *           |  |           |      PP    PP      |           |  |
771  *           |  +-----------+      PP    PP      +-----------+  |
772  *           |                     PP    PP                     |
773  *           +---------------------PP----PP---------------------+
774  *                                 PP    PP
775  *                                 PP    PP
776  *
777  *	The above has four core complex dies installed, though the Zen 2 EPYC
778  *	and ThreadRipper parts allow for up to eight, while the Ryzen parts
779  *	generally only have one to two. The more notable difference here is how
780  *	everything communicates. Note that memory and PCIe come out of the
781  *	central die. This changes the way that one die accesses a resource. It
782  *	basically always has to go to the I/O die, where as in Zen 1 it may have
783  *	satisfied it locally. In general, this ends up being a better strategy
784  *	for most things, though it is possible to still treat everything in four
785  *	distinct NUMA domains with each Zen 2 die slightly closer to some memory
786  *	and PCIe than otherwise. This also impacts the 'amdzen' nexus driver as
787  *	now there is only one 'node' present.
788  *
789  * ZEN 3
790  *
791  *	From an architectural perspective, Zen 3 is a much smaller change from
792  *	Zen 2 than Zen 2 was from Zen 1, though it makes up for most of that in
793  *	its microarchitectural changes. The biggest thing for us is how the die
794  *	changes. In Zen 1 and Zen 2, each core complex still had its own L3
795  *	cache. However, in Zen 3, the L3 is now shared between the entire core
796  *	complex die and is no longer partitioned between each core complex. This
797  *	means that all cores on the die can share the same L3 cache. Otherwise,
798  *	the general layout of the overall package with various core complexes
799  *	and an I/O die stays the same. Here's what the Core Complex Die looks
800  *	like in a bit more detail:
801  *
802  *               +-------------------------------------------------+
803  *               | Zen 3 Core Complex Die                          |
804  *               | +-------------------+    +-------------------+  |
805  *               | | Core       +----+ |    | Core       +----+ |  |
806  *               | | +--------+ | L2 | |    | +--------+ | L2 | |  |
807  *               | | | Thread | +----+ |    | | Thread | +----+ |  |
808  *               | | +--------+-+ +--+ |    | +--------+-+ +--+ |  |
809  *               | |   | Thread | |L1| |    |   | Thread | |L1| |  |
810  *               | |   +--------+ +--+ |    |   +--------+ +--+ |  |
811  *               | +-------------------+    +-------------------+  |
812  *               | +-------------------+    +-------------------+  |
813  *               | | Core       +----+ |    | Core       +----+ |  |
814  *               | | +--------+ | L2 | |    | +--------+ | L2 | |  |
815  *               | | | Thread | +----+ |    | | Thread | +----+ |  |
816  *               | | +--------+-+ +--+ |    | +--------+-+ +--+ |  |
817  *               | |   | Thread | |L1| |    |   | Thread | |L1| |  |
818  *               | |   +--------+ +--+ |    |   +--------+ +--+ |  |
819  *               | +-------------------+    +-------------------+  |
820  *               |                                                 |
821  *               | +--------------------------------------------+  |
822  *               | |                 L3 Cache                   |  |
823  *               | +--------------------------------------------+  |
824  *               |                                                 |
825  *               | +-------------------+    +-------------------+  |
826  *               | | Core       +----+ |    | Core       +----+ |  |
827  *               | | +--------+ | L2 | |    | +--------+ | L2 | |  |
828  *               | | | Thread | +----+ |    | | Thread | +----+ |  |
829  *               | | +--------+-+ +--+ |    | +--------+-+ +--+ |  |
830  *               | |   | Thread | |L1| |    |   | Thread | |L1| |  |
831  *               | |   +--------+ +--+ |    |   +--------+ +--+ |  |
832  *               | +-------------------+    +-------------------+  |
833  *               | +-------------------+    +-------------------+  |
834  *               | | Core       +----+ |    | Core       +----+ |  |
835  *               | | +--------+ | L2 | |    | +--------+ | L2 | |  |
836  *               | | | Thread | +----+ |    | | Thread | +----+ |  |
837  *               | | +--------+-+ +--+ |    | +--------+-+ +--+ |  |
838  *               | |   | Thread | |L1| |    |   | Thread | |L1| |  |
839  *               | |   +--------+ +--+ |    |   +--------+ +--+ |  |
840  *               | +-------------------+    +-------------------+  |
841  *               +-------------------------------------------------+
842  *
843  *	While it is not pictured, there are connections from the die to the
844  *	broader data fabric and additional functional blocks to support that
845  *	communication and coherency.
846  *
847  * CPUID LEAVES
848  *
849  * There are a few different CPUID leaves that we can use to try and understand
850  * the actual state of the world. As part of the introduction of family 0xf, AMD
851  * added CPUID leaf 0x80000008. This leaf tells us the number of logical
852  * processors that are in the system. Because families before Zen didn't have
853  * SMT, this was always the number of cores that were in the system. However, it
854  * should always be thought of as the number of logical threads to be consistent
855  * between generations. In addition we also get the size of the APIC ID that is
856  * used to represent the number of logical processors. This is important for
857  * deriving topology information.
858  *
859  * In the Bulldozer family, AMD added leaf 0x8000001E. The information varies a
860  * bit between Bulldozer and later families, but it is quite useful in
861  * determining the topology information. Because this information has changed
862  * across family generations, it's worth calling out what these mean
863  * explicitly. The registers have the following meanings:
864  *
865  *	%eax	The APIC ID. The entire register is defined to have a 32-bit
866  *		APIC ID, even though on systems without x2apic support, it will
867  *		be limited to 8 bits.
868  *
869  *	%ebx	On Bulldozer-era systems this contains information about the
870  *		number of cores that are in a compute unit (cores that share
871  *		resources). It also contains a per-package compute unit ID that
872  *		identifies which compute unit the logical CPU is a part of.
873  *
874  *		On Zen-era systems this instead contains the number of threads
875  *		per core and the ID of the core that the logical CPU is a part
876  *		of. Note, this ID is unique only to the package, it is not
877  *		globally unique across the entire system.
878  *
879  *	%ecx	This contains the number of nodes that exist in the package. It
880  *		also contains an ID that identifies which node the logical CPU
881  *		is a part of.
882  *
883  * Finally, we also use cpuid leaf 0x8000001D to determine information about the
884  * cache layout to determine which logical CPUs are sharing which caches.
885  *
886  * illumos Topology
887  * ----------------
888  *
889  * Based on the above we synthesize the information into several different
890  * variables that we store in the 'struct cpuid_info'. We'll go into the details
891  * of what each member is supposed to represent and their uniqueness. In
892  * general, there are two levels of uniqueness that we care about. We care about
893  * an ID that is globally unique. That means that it will be unique across all
894  * entities in the system. For example, the default logical CPU ID is globally
895  * unique. On the other hand, there is some information that we only care about
896  * being unique within the context of a single package / socket. Here are the
897  * variables that we keep track of and their meaning.
898  *
899  * Several of the values that are asking for an identifier, with the exception
900  * of cpi_apicid, are allowed to be synthetic.
901  *
902  *
903  * cpi_apicid
904  *
905  *	This is the value of the CPU's APIC id. This should be the full 32-bit
906  *	ID if the CPU is using the x2apic. Otherwise, it should be the 8-bit
907  *	APIC ID. This value is globally unique between all logical CPUs across
908  *	all packages. This is usually required by the APIC.
909  *
910  * cpi_chipid
911  *
912  *	This value indicates the ID of the package that the logical CPU is a
913  *	part of. This value is allowed to be synthetic. It is usually derived by
914  *	taking the CPU's APIC ID and determining how many bits are used to
915  *	represent CPU cores in the package. All logical CPUs that are part of
916  *	the same package must have the same value.
917  *
918  * cpi_coreid
919  *
920  *	This represents the ID of a CPU core. Two logical CPUs should only have
921  *	the same cpi_coreid value if they are part of the same core. These
922  *	values may be synthetic. On systems that support SMT, this value is
923  *	usually derived from the APIC ID, otherwise it is often synthetic and
924  *	just set to the value of the cpu_id in the cpu_t.
925  *
926  * cpi_pkgcoreid
927  *
928  *	This is similar to the cpi_coreid in that logical CPUs that are part of
929  *	the same core should have the same ID. The main difference is that these
930  *	values are only required to be unique to a given socket.
931  *
932  * cpi_clogid
933  *
934  *	This represents the logical ID of a logical CPU. This value should be
935  *	unique within a given socket for each logical CPU. This is allowed to be
936  *	synthetic, though it is usually based off of the CPU's apic ID. The
937  *	broader system expects that logical CPUs that have are part of the same
938  *	core have contiguous numbers. For example, if there were two threads per
939  *	core, then the core IDs divided by two should be the same and the first
940  *	modulus two should be zero and the second one. For example, IDs 4 and 5
941  *	indicate two logical CPUs that are part of the same core. But IDs 5 and
942  *	6 represent two logical CPUs that are part of different cores.
943  *
944  *	While it is common for the cpi_coreid and the cpi_clogid to be derived
945  *	from the same source, strictly speaking, they don't have to be and the
946  *	two values should be considered logically independent. One should not
947  *	try to compare a logical CPU's cpi_coreid and cpi_clogid to determine
948  *	some kind of relationship. While this is tempting, we've seen cases on
949  *	AMD family 0xf where the system's cpu id is not related to its APIC ID.
950  *
951  * cpi_ncpu_per_chip
952  *
953  *	This value indicates the total number of logical CPUs that exist in the
954  *	physical package. Critically, this is not the number of logical CPUs
955  *	that exist for just the single core.
956  *
957  *	This value should be the same for all logical CPUs in the same package.
958  *
959  * cpi_ncore_per_chip
960  *
961  *	This value indicates the total number of physical CPU cores that exist
962  *	in the package. The system compares this value with cpi_ncpu_per_chip to
963  *	determine if simultaneous multi-threading (SMT) is enabled. When
964  *	cpi_ncpu_per_chip equals cpi_ncore_per_chip, then there is no SMT and
965  *	the X86FSET_HTT feature is not set. If this value is greater than one,
966  *	than we consider the processor to have the feature X86FSET_CMP, to
967  *	indicate that there is support for more than one core.
968  *
969  *	This value should be the same for all logical CPUs in the same package.
970  *
971  * cpi_procnodes_per_pkg
972  *
973  *	This value indicates the number of 'nodes' that exist in the package.
974  *	When processors are actually a multi-chip module, this represents the
975  *	number of such modules that exist in the package. Currently, on Intel
976  *	based systems this member is always set to 1.
977  *
978  *	This value should be the same for all logical CPUs in the same package.
979  *
980  * cpi_procnodeid
981  *
982  *	This value indicates the ID of the node that the logical CPU is a part
983  *	of. All logical CPUs that are in the same node must have the same value
984  *	here. This value must be unique across all of the packages in the
985  *	system.  On Intel based systems, this is currently set to the value in
986  *	cpi_chipid because there is only one node.
987  *
988  * cpi_cores_per_compunit
989  *
990  *	This value indicates the number of cores that are part of a compute
991  *	unit. See the AMD topology section for this. This member only has real
992  *	meaning currently for AMD Bulldozer family processors. For all other
993  *	processors, this should currently be set to 1.
994  *
995  * cpi_compunitid
996  *
997  *	This indicates the compute unit that the logical CPU belongs to. For
998  *	processors without AMD Bulldozer-style compute units this should be set
999  *	to the value of cpi_coreid.
1000  *
1001  * cpi_ncpu_shr_last_cache
1002  *
1003  *	This indicates the number of logical CPUs that are sharing the same last
1004  *	level cache. This value should be the same for all CPUs that are sharing
1005  *	that cache. The last cache refers to the cache that is closest to memory
1006  *	and furthest away from the CPU.
1007  *
1008  * cpi_last_lvl_cacheid
1009  *
1010  *	This indicates the ID of the last cache that the logical CPU uses. This
1011  *	cache is often shared between multiple logical CPUs and is the cache
1012  *	that is closest to memory and furthest away from the CPU. This value
1013  *	should be the same for a group of logical CPUs only if they actually
1014  *	share the same last level cache. IDs should not overlap between
1015  *	packages.
1016  *
1017  * cpi_ncore_bits
1018  *
1019  *	This indicates the number of bits that are required to represent all of
1020  *	the cores in the system. As cores are derived based on their APIC IDs,
1021  *	we aren't guaranteed a run of APIC IDs starting from zero. It's OK for
1022  *	this value to be larger than the actual number of IDs that are present
1023  *	in the system. This is used to size tables by the CMI framework. It is
1024  *	only filled in for Intel and AMD CPUs.
1025  *
1026  * cpi_nthread_bits
1027  *
1028  *	This indicates the number of bits required to represent all of the IDs
1029  *	that cover the logical CPUs that exist on a given core. It's OK for this
1030  *	value to be larger than the actual number of IDs that are present in the
1031  *	system.  This is used to size tables by the CMI framework. It is
1032  *	only filled in for Intel and AMD CPUs.
1033  *
1034  * -----------
1035  * Hypervisors
1036  * -----------
1037  *
1038  * If trying to manage the differences between vendors wasn't bad enough, it can
1039  * get worse thanks to our friend hardware virtualization. Hypervisors are given
1040  * the ability to interpose on all cpuid instructions and change them to suit
1041  * their purposes. In general, this is necessary as the hypervisor wants to be
1042  * able to present a more uniform set of features or not necessarily give the
1043  * guest operating system kernel knowledge of all features so it can be
1044  * more easily migrated between systems.
1045  *
1046  * When it comes to trying to determine topology information, this can be a
1047  * double edged sword. When a hypervisor doesn't actually implement a cpuid
1048  * leaf, it'll often return all zeros. Because of that, you'll often see various
1049  * checks scattered about fields being non-zero before we assume we can use
1050  * them.
1051  *
1052  * When it comes to topology information, the hypervisor is often incentivized
1053  * to lie to you about topology. This is because it doesn't always actually
1054  * guarantee that topology at all. The topology path we take in the system
1055  * depends on how the CPU advertises itself. If it advertises itself as an Intel
1056  * or AMD CPU, then we basically do our normal path. However, when they don't
1057  * use an actual vendor, then that usually turns into multiple one-core CPUs
1058  * that we enumerate that are often on different sockets. The actual behavior
1059  * depends greatly on what the hypervisor actually exposes to us.
1060  *
1061  * --------------------
1062  * Exposing Information
1063  * --------------------
1064  *
1065  * We expose CPUID information in three different forms in the system.
1066  *
1067  * The first is through the x86_featureset variable. This is used in conjunction
1068  * with the is_x86_feature() function. This is queried by x86-specific functions
1069  * to determine which features are or aren't present in the system and to make
1070  * decisions based upon them. For example, users of this include everything from
1071  * parts of the system dedicated to reliability, availability, and
1072  * serviceability (RAS), to making decisions about how to handle security
1073  * mitigations, to various x86-specific drivers. General purpose or
1074  * architecture independent drivers should never be calling this function.
1075  *
1076  * The second means is through the auxiliary vector. The auxiliary vector is a
1077  * series of tagged data that the kernel passes down to a user program when it
1078  * begins executing. This information is used to indicate to programs what
1079  * instruction set extensions are present. For example, information about the
1080  * CPU supporting the machine check architecture (MCA) wouldn't be passed down
1081  * since user programs cannot make use of it. However, things like the AVX
1082  * instruction sets are. Programs use this information to make run-time
1083  * decisions about what features they should use. As an example, the run-time
1084  * link-editor (rtld) can relocate different functions depending on the hardware
1085  * support available.
1086  *
1087  * The final form is through a series of accessor functions that all have the
1088  * form cpuid_get*. This is used by a number of different subsystems in the
1089  * kernel to determine more detailed information about what we're running on,
1090  * topology information, etc. Some of these subsystems include processor groups
1091  * (uts/common/os/pg.c.), CPU Module Interface (uts/i86pc/os/cmi.c), ACPI,
1092  * microcode, and performance monitoring. These functions all ASSERT that the
1093  * CPU they're being called on has reached a certain cpuid pass. If the passes
1094  * are rearranged, then this needs to be adjusted.
1095  *
1096  * -----------------------------------------------
1097  * Speculative Execution CPU Side Channel Security
1098  * -----------------------------------------------
1099  *
1100  * With the advent of the Spectre and Meltdown attacks which exploit speculative
1101  * execution in the CPU to create side channels there have been a number of
1102  * different attacks and corresponding issues that the operating system needs to
1103  * mitigate against. The following list is some of the common, but not
1104  * exhaustive, set of issues that we know about and have done some or need to do
1105  * more work in the system to mitigate against:
1106  *
1107  *   - Spectre v1
1108  *   - swapgs (Spectre v1 variant)
1109  *   - Spectre v2
1110  *   - Meltdown (Spectre v3)
1111  *   - Rogue Register Read (Spectre v3a)
1112  *   - Speculative Store Bypass (Spectre v4)
1113  *   - ret2spec, SpectreRSB
1114  *   - L1 Terminal Fault (L1TF)
1115  *   - Microarchitectural Data Sampling (MDS)
1116  *
1117  * Each of these requires different sets of mitigations and has different attack
1118  * surfaces. For the most part, this discussion is about protecting the kernel
1119  * from non-kernel executing environments such as user processes and hardware
1120  * virtual machines. Unfortunately, there are a number of user vs. user
1121  * scenarios that exist with these. The rest of this section will describe the
1122  * overall approach that the system has taken to address these as well as their
1123  * shortcomings. Unfortunately, not all of the above have been handled today.
1124  *
1125  * SPECTRE v2, ret2spec, SpectreRSB
1126  *
1127  * The second variant of the spectre attack focuses on performing branch target
1128  * injection. This generally impacts indirect call instructions in the system.
1129  * There are four different ways to mitigate this issue that are commonly
1130  * described today:
1131  *
1132  *  1. Using Indirect Branch Restricted Speculation (IBRS).
1133  *  2. Using Retpolines and RSB Stuffing
1134  *  3. Using Enhanced Indirect Branch Restricted Speculation (eIBRS)
1135  *  4. Using Automated Indirect Branch Restricted Speculation (AIBRS)
1136  *
1137  * IBRS uses a feature added to microcode to restrict speculation, among other
1138  * things. This form of mitigation has not been used as it has been generally
1139  * seen as too expensive and requires reactivation upon various transitions in
1140  * the system.
1141  *
1142  * As a less impactful alternative to IBRS, retpolines were developed by
1143  * Google. These basically require one to replace indirect calls with a specific
1144  * trampoline that will cause speculation to fail and break the attack.
1145  * Retpolines require compiler support. We always build with retpolines in the
1146  * external thunk mode. This means that a traditional indirect call is replaced
1147  * with a call to one of the __x86_indirect_thunk_<reg> functions. A side effect
1148  * of this is that all indirect function calls are performed through a register.
1149  *
1150  * We have to use a common external location of the thunk and not inline it into
1151  * the callsite so that way we can have a single place to patch these functions.
1152  * As it turns out, we currently have two different forms of retpolines that
1153  * exist in the system:
1154  *
1155  *  1. A full retpoline
1156  *  2. A no-op version
1157  *
1158  * The first one is used in the general case. Historically, there was an
1159  * AMD-specific optimized retopoline variant that was based around using a
1160  * serializing lfence instruction; however, in March 2022 it was announced that
1161  * this was actually still vulnerable to Spectre v2 and therefore we no longer
1162  * use it and it is no longer available in the system.
1163  *
1164  * The third form described above is the most curious. It turns out that the way
1165  * that retpolines are implemented is that they rely on how speculation is
1166  * performed on a 'ret' instruction. Intel has continued to optimize this
1167  * process (which is partly why we need to have return stack buffer stuffing,
1168  * but more on that in a bit) and in processors starting with Cascade Lake
1169  * on the server side, it's dangerous to rely on retpolines. Instead, a new
1170  * mechanism has been introduced called Enhanced IBRS (eIBRS).
1171  *
1172  * Unlike IBRS, eIBRS is designed to be enabled once at boot and left on each
1173  * physical core. However, if this is the case, we don't want to use retpolines
1174  * any more. Therefore if eIBRS is present, we end up turning each retpoline
1175  * function (called a thunk) into a jmp instruction. This means that we're still
1176  * paying the cost of an extra jump to the external thunk, but it gives us
1177  * flexibility and the ability to have a single kernel image that works across a
1178  * wide variety of systems and hardware features.
1179  *
1180  * Unfortunately, this alone is insufficient. First, Skylake systems have
1181  * additional speculation for the Return Stack Buffer (RSB) which is used to
1182  * return from call instructions which retpolines take advantage of. However,
1183  * this problem is not just limited to Skylake and is actually more pernicious.
1184  * The SpectreRSB paper introduces several more problems that can arise with
1185  * dealing with this. The RSB can be poisoned just like the indirect branch
1186  * predictor. This means that one needs to clear the RSB when transitioning
1187  * between two different privilege domains. Some examples include:
1188  *
1189  *  - Switching between two different user processes
1190  *  - Going between user land and the kernel
1191  *  - Returning to the kernel from a hardware virtual machine
1192  *
1193  * Mitigating this involves combining a couple of different things. The first is
1194  * SMEP (supervisor mode execution protection) which was introduced in Ivy
1195  * Bridge. When an RSB entry refers to a user address and we're executing in the
1196  * kernel, speculation through it will be stopped when SMEP is enabled. This
1197  * protects against a number of the different cases that we would normally be
1198  * worried about such as when we enter the kernel from user land.
1199  *
1200  * To prevent against additional manipulation of the RSB from other contexts
1201  * such as a non-root VMX context attacking the kernel we first look to
1202  * enhanced IBRS. When eIBRS is present and enabled, then there should be
1203  * nothing else that we need to do to protect the kernel at this time.
1204  *
1205  * Unfortunately, eIBRS or not, we need to manually overwrite the contents of
1206  * the return stack buffer. We do this through the x86_rsb_stuff() function.
1207  * Currently this is employed on context switch and vmx_exit. The
1208  * x86_rsb_stuff() function is disabled only when mitigations in general are.
1209  *
1210  * If SMEP is not present, then we would have to stuff the RSB every time we
1211  * transitioned from user mode to the kernel, which isn't very practical right
1212  * now.
1213  *
1214  * To fully protect user to user and vmx to vmx attacks from these classes of
1215  * issues, we would also need to allow them to opt into performing an Indirect
1216  * Branch Prediction Barrier (IBPB) on switch. This is not currently wired up.
1217  *
1218  * The fourth form of mitigation here is specific to AMD and is called Automated
1219  * IBRS (AIBRS). This is similar in spirit to eIBRS; however rather than set the
1220  * IBRS bit in MSR_IA32_SPEC_CTRL (0x48) we instead set a bit in the EFER
1221  * (extended feature enable register) MSR. This bit basically says that IBRS
1222  * acts as though it is always active when executing at CPL0 and when executing
1223  * in the 'host' context when SEV-SNP is enabled.
1224  *
1225  * When this is active, AMD states that the RSB is cleared on VMEXIT and
1226  * therefore it is unnecessary. While this handles RSB stuffing attacks from SVM
1227  * to the kernel, we must still consider the remaining cases that exist, just
1228  * like above. While traditionally AMD employed a 32 entry RSB allowing the
1229  * traditional technique to work, this is not true on all CPUs. While a write to
1230  * IBRS would clear the RSB if the processor supports more than 32 entries (but
1231  * not otherwise), AMD states that as long as at leat a single 4 KiB unmapped
1232  * guard page is present between user and kernel address spaces and SMEP is
1233  * enabled, then there is no need to clear the RSB at all.
1234  *
1235  * By default, the system will enable RSB stuffing and the required variant of
1236  * retpolines and store that information in the x86_spectrev2_mitigation value.
1237  * This will be evaluated after a microcode update as well, though it is
1238  * expected that microcode updates will not take away features. This may mean
1239  * that a late loaded microcode may not end up in the optimal configuration
1240  * (though this should be rare).
1241  *
1242  * Currently we do not build kmdb with retpolines or perform any additional side
1243  * channel security mitigations for it. One complication with kmdb is that it
1244  * requires its own retpoline thunks and it would need to adjust itself based on
1245  * what the kernel does. The threat model of kmdb is more limited and therefore
1246  * it may make more sense to investigate using prediction barriers as the whole
1247  * system is only executing a single instruction at a time while in kmdb.
1248  *
1249  * SPECTRE v1, v4
1250  *
1251  * The v1 and v4 variants of spectre are not currently mitigated in the
1252  * system and require other classes of changes to occur in the code.
1253  *
1254  * SPECTRE v1 (SWAPGS VARIANT)
1255  *
1256  * The class of Spectre v1 vulnerabilities aren't all about bounds checks, but
1257  * can generally affect any branch-dependent code. The swapgs issue is one
1258  * variant of this. If we are coming in from userspace, we can have code like
1259  * this:
1260  *
1261  *	cmpw	$KCS_SEL, REGOFF_CS(%rsp)
1262  *	je	1f
1263  *	movq	$0, REGOFF_SAVFP(%rsp)
1264  *	swapgs
1265  *	1:
1266  *	movq	%gs:CPU_THREAD, %rax
1267  *
1268  * If an attacker can cause a mis-speculation of the branch here, we could skip
1269  * the needed swapgs, and use the /user/ %gsbase as the base of the %gs-based
1270  * load. If subsequent code can act as the usual Spectre cache gadget, this
1271  * would potentially allow KPTI bypass. To fix this, we need an lfence prior to
1272  * any use of the %gs override.
1273  *
1274  * The other case is also an issue: if we're coming into a trap from kernel
1275  * space, we could mis-speculate and swapgs the user %gsbase back in prior to
1276  * using it. AMD systems are not vulnerable to this version, as a swapgs is
1277  * serializing with respect to subsequent uses. But as AMD /does/ need the other
1278  * case, and the fix is the same in both cases (an lfence at the branch target
1279  * 1: in this example), we'll just do it unconditionally.
1280  *
1281  * Note that we don't enable user-space "wrgsbase" via CR4_FSGSBASE, making it
1282  * harder for user-space to actually set a useful %gsbase value: although it's
1283  * not clear, it might still be feasible via lwp_setprivate(), though, so we
1284  * mitigate anyway.
1285  *
1286  * MELTDOWN
1287  *
1288  * Meltdown, or spectre v3, allowed a user process to read any data in their
1289  * address space regardless of whether or not the page tables in question
1290  * allowed the user to have the ability to read them. The solution to meltdown
1291  * is kernel page table isolation. In this world, there are two page tables that
1292  * are used for a process, one in user land and one in the kernel. To implement
1293  * this we use per-CPU page tables and switch between the user and kernel
1294  * variants when entering and exiting the kernel.  For more information about
1295  * this process and how the trampolines work, please see the big theory
1296  * statements and additional comments in:
1297  *
1298  *  - uts/i86pc/ml/kpti_trampolines.s
1299  *  - uts/i86pc/vm/hat_i86.c
1300  *
1301  * While Meltdown only impacted Intel systems and there are also Intel systems
1302  * that have Meltdown fixed (called Rogue Data Cache Load), we always have
1303  * kernel page table isolation enabled. While this may at first seem weird, an
1304  * important thing to remember is that you can't speculatively read an address
1305  * if it's never in your page table at all. Having user processes without kernel
1306  * pages present provides us with an important layer of defense in the kernel
1307  * against any other side channel attacks that exist and have yet to be
1308  * discovered. As such, kernel page table isolation (KPTI) is always enabled by
1309  * default, no matter the x86 system.
1310  *
1311  * L1 TERMINAL FAULT
1312  *
1313  * L1 Terminal Fault (L1TF) takes advantage of an issue in how speculative
1314  * execution uses page table entries. Effectively, it is two different problems.
1315  * The first is that it ignores the not present bit in the page table entries
1316  * when performing speculative execution. This means that something can
1317  * speculatively read the listed physical address if it's present in the L1
1318  * cache under certain conditions (see Intel's documentation for the full set of
1319  * conditions). Secondly, this can be used to bypass hardware virtualization
1320  * extended page tables (EPT) that are part of Intel's hardware virtual machine
1321  * instructions.
1322  *
1323  * For the non-hardware virtualized case, this is relatively easy to deal with.
1324  * We must make sure that all unmapped pages have an address of zero. This means
1325  * that they could read the first 4k of physical memory; however, we never use
1326  * that first page in the operating system and always skip putting it in our
1327  * memory map, even if firmware tells us we can use it in our memory map. While
1328  * other systems try to put extra metadata in the address and reserved bits,
1329  * which led to this being problematic in those cases, we do not.
1330  *
1331  * For hardware virtual machines things are more complicated. Because they can
1332  * construct their own page tables, it isn't hard for them to perform this
1333  * attack against any physical address. The one wrinkle is that this physical
1334  * address must be in the L1 data cache. Thus Intel added an MSR that we can use
1335  * to flush the L1 data cache. We wrap this up in the function
1336  * spec_uarch_flush(). This function is also used in the mitigation of
1337  * microarchitectural data sampling (MDS) discussed later on. Kernel based
1338  * hypervisors such as KVM or bhyve are responsible for performing this before
1339  * entering the guest.
1340  *
1341  * Because this attack takes place in the L1 cache, there's another wrinkle
1342  * here. The L1 cache is shared between all logical CPUs in a core in most Intel
1343  * designs. This means that when a thread enters a hardware virtualized context
1344  * and flushes the L1 data cache, the other thread on the processor may then go
1345  * ahead and put new data in it that can be potentially attacked. While one
1346  * solution is to disable SMT on the system, another option that is available is
1347  * to use a feature for hardware virtualization called 'SMT exclusion'. This
1348  * goes through and makes sure that if a HVM is being scheduled on one thread,
1349  * then the thing on the other thread is from the same hardware virtual machine.
1350  * If an interrupt comes in or the guest exits to the broader system, then the
1351  * other SMT thread will be kicked out.
1352  *
1353  * L1TF can be fully mitigated by hardware. If the RDCL_NO feature is set in the
1354  * architecture capabilities MSR (MSR_IA32_ARCH_CAPABILITIES), then we will not
1355  * perform L1TF related mitigations.
1356  *
1357  * MICROARCHITECTURAL DATA SAMPLING
1358  *
1359  * Microarchitectural data sampling (MDS) is a combination of four discrete
1360  * vulnerabilities that are similar issues affecting various parts of the CPU's
1361  * microarchitectural implementation around load, store, and fill buffers.
1362  * Specifically it is made up of the following subcomponents:
1363  *
1364  *  1. Microarchitectural Store Buffer Data Sampling (MSBDS)
1365  *  2. Microarchitectural Fill Buffer Data Sampling (MFBDS)
1366  *  3. Microarchitectural Load Port Data Sampling (MLPDS)
1367  *  4. Microarchitectural Data Sampling Uncacheable Memory (MDSUM)
1368  *
1369  * To begin addressing these, Intel has introduced another feature in microcode
1370  * called MD_CLEAR. This changes the verw instruction to operate in a different
1371  * way. This allows us to execute the verw instruction in a particular way to
1372  * flush the state of the affected parts. The L1TF L1D flush mechanism is also
1373  * updated when this microcode is present to flush this state.
1374  *
1375  * Primarily we need to flush this state whenever we transition from the kernel
1376  * to a less privileged context such as user mode or an HVM guest. MSBDS is a
1377  * little bit different. Here the structures are statically sized when a logical
1378  * CPU is in use and resized when it goes to sleep. Therefore, we also need to
1379  * flush the microarchitectural state before the CPU goes idles by calling hlt,
1380  * mwait, or another ACPI method. To perform these flushes, we call
1381  * x86_md_clear() at all of these transition points.
1382  *
1383  * If hardware enumerates RDCL_NO, indicating that it is not vulnerable to L1TF,
1384  * then we change the spec_uarch_flush() function to point to x86_md_clear(). If
1385  * MDS_NO has been set, then this is fully mitigated and x86_md_clear() becomes
1386  * a no-op.
1387  *
1388  * Unfortunately, with this issue hyperthreading rears its ugly head. In
1389  * particular, everything we've discussed above is only valid for a single
1390  * thread executing on a core. In the case where you have hyper-threading
1391  * present, this attack can be performed between threads. The theoretical fix
1392  * for this is to ensure that both threads are always in the same security
1393  * domain. This means that they are executing in the same ring and mutually
1394  * trust each other. Practically speaking, this would mean that a system call
1395  * would have to issue an inter-processor interrupt (IPI) to the other thread.
1396  * Rather than implement this, we recommend that one disables hyper-threading
1397  * through the use of psradm -aS.
1398  *
1399  * TSX ASYNCHRONOUS ABORT
1400  *
1401  * TSX Asynchronous Abort (TAA) is another side-channel vulnerability that
1402  * behaves like MDS, but leverages Intel's transactional instructions as another
1403  * vector. Effectively, when a transaction hits one of these cases (unmapped
1404  * page, various cache snoop activity, etc.) then the same data can be exposed
1405  * as in the case of MDS. This means that you can attack your twin.
1406  *
1407  * Intel has described that there are two different ways that we can mitigate
1408  * this problem on affected processors:
1409  *
1410  *   1) We can use the same techniques used to deal with MDS. Flushing the
1411  *      microarchitectural buffers and disabling hyperthreading will mitigate
1412  *      this in the same way.
1413  *
1414  *   2) Using microcode to disable TSX.
1415  *
1416  * Now, most processors that are subject to MDS (as in they don't have MDS_NO in
1417  * the IA32_ARCH_CAPABILITIES MSR) will not receive microcode to disable TSX.
1418  * That's OK as we're already doing all such mitigations. On the other hand,
1419  * processors with MDS_NO are all supposed to receive microcode updates that
1420  * enumerate support for disabling TSX. In general, we'd rather use this method
1421  * when available as it doesn't require disabling hyperthreading to be
1422  * effective. Currently we basically are relying on microcode for processors
1423  * that enumerate MDS_NO.
1424  *
1425  * The microcode features are enumerated as part of the IA32_ARCH_CAPABILITIES.
1426  * When bit 7 (IA32_ARCH_CAP_TSX_CTRL) is present, then we are given two
1427  * different powers. The first allows us to cause all transactions to
1428  * immediately abort. The second gives us a means of disabling TSX completely,
1429  * which includes removing it from cpuid. If we have support for this in
1430  * microcode during the first cpuid pass, then we'll disable TSX completely such
1431  * that user land never has a chance to observe the bit. However, if we are late
1432  * loading the microcode, then we must use the functionality to cause
1433  * transactions to automatically abort. This is necessary for user land's sake.
1434  * Once a program sees a cpuid bit, it must not be taken away.
1435  *
1436  * We track whether or not we should do this based on what cpuid pass we're in.
1437  * Whenever we hit cpuid_scan_security() on the boot CPU and we're still on pass
1438  * 1 of the cpuid logic, then we can completely turn off TSX. Notably this
1439  * should happen twice. Once in the normal cpuid_pass_basic() code and then a
1440  * second time after we do the initial microcode update.  As a result we need to
1441  * be careful in cpuid_apply_tsx() to only use the MSR if we've loaded a
1442  * suitable microcode on the current CPU (which happens prior to
1443  * cpuid_pass_ucode()).
1444  *
1445  * If TAA has been fixed, then it will be enumerated in IA32_ARCH_CAPABILITIES
1446  * as TAA_NO. In such a case, we will still disable TSX: it's proven to be an
1447  * unfortunate feature in a number of ways, and taking the opportunity to
1448  * finally be able to turn it off is likely to be of benefit in the future.
1449  *
1450  * SUMMARY
1451  *
1452  * The following table attempts to summarize the mitigations for various issues
1453  * and what's done in various places:
1454  *
1455  *  - Spectre v1: Not currently mitigated
1456  *  - swapgs: lfences after swapgs paths
1457  *  - Spectre v2: Retpolines/RSB Stuffing or eIBRS/AIBRS if HW support
1458  *  - Meltdown: Kernel Page Table Isolation
1459  *  - Spectre v3a: Updated CPU microcode
1460  *  - Spectre v4: Not currently mitigated
1461  *  - SpectreRSB: SMEP and RSB Stuffing
1462  *  - L1TF: spec_uarch_flush, SMT exclusion, requires microcode
1463  *  - MDS: x86_md_clear, requires microcode, disabling SMT
1464  *  - TAA: x86_md_clear and disabling SMT OR microcode and disabling TSX
1465  *
1466  * The following table indicates the x86 feature set bits that indicate that a
1467  * given problem has been solved or a notable feature is present:
1468  *
1469  *  - RDCL_NO: Meltdown, L1TF, MSBDS subset of MDS
1470  *  - MDS_NO: All forms of MDS
1471  *  - TAA_NO: TAA
1472  */
1473 
1474 #include <sys/types.h>
1475 #include <sys/archsystm.h>
1476 #include <sys/x86_archext.h>
1477 #include <sys/kmem.h>
1478 #include <sys/systm.h>
1479 #include <sys/cmn_err.h>
1480 #include <sys/sunddi.h>
1481 #include <sys/sunndi.h>
1482 #include <sys/cpuvar.h>
1483 #include <sys/processor.h>
1484 #include <sys/sysmacros.h>
1485 #include <sys/pg.h>
1486 #include <sys/fp.h>
1487 #include <sys/controlregs.h>
1488 #include <sys/bitmap.h>
1489 #include <sys/auxv_386.h>
1490 #include <sys/memnode.h>
1491 #include <sys/pci_cfgspace.h>
1492 #include <sys/comm_page.h>
1493 #include <sys/mach_mmu.h>
1494 #include <sys/ucode.h>
1495 #include <sys/tsc.h>
1496 #include <sys/kobj.h>
1497 #include <sys/asm_misc.h>
1498 #include <sys/bitmap.h>
1499 
1500 #ifdef __xpv
1501 #include <sys/hypervisor.h>
1502 #else
1503 #include <sys/ontrap.h>
1504 #endif
1505 
1506 uint_t x86_vendor = X86_VENDOR_IntelClone;
1507 uint_t x86_type = X86_TYPE_OTHER;
1508 uint_t x86_clflush_size = 0;
1509 
1510 #if defined(__xpv)
1511 int x86_use_pcid = 0;
1512 int x86_use_invpcid = 0;
1513 #else
1514 int x86_use_pcid = -1;
1515 int x86_use_invpcid = -1;
1516 #endif
1517 
1518 typedef enum {
1519 	X86_SPECTREV2_RETPOLINE,
1520 	X86_SPECTREV2_ENHANCED_IBRS,
1521 	X86_SPECTREV2_AUTO_IBRS,
1522 	X86_SPECTREV2_DISABLED
1523 } x86_spectrev2_mitigation_t;
1524 
1525 uint_t x86_disable_spectrev2 = 0;
1526 static x86_spectrev2_mitigation_t x86_spectrev2_mitigation =
1527     X86_SPECTREV2_RETPOLINE;
1528 
1529 /*
1530  * The mitigation status for TAA:
1531  * X86_TAA_NOTHING -- no mitigation available for TAA side-channels
1532  * X86_TAA_DISABLED -- mitigation disabled via x86_disable_taa
1533  * X86_TAA_MD_CLEAR -- MDS mitigation also suffices for TAA
1534  * X86_TAA_TSX_FORCE_ABORT -- transactions are forced to abort
1535  * X86_TAA_TSX_DISABLE -- force abort transactions and hide from CPUID
1536  * X86_TAA_HW_MITIGATED -- TSX potentially active but H/W not TAA-vulnerable
1537  */
1538 typedef enum {
1539 	X86_TAA_NOTHING,
1540 	X86_TAA_DISABLED,
1541 	X86_TAA_MD_CLEAR,
1542 	X86_TAA_TSX_FORCE_ABORT,
1543 	X86_TAA_TSX_DISABLE,
1544 	X86_TAA_HW_MITIGATED
1545 } x86_taa_mitigation_t;
1546 
1547 uint_t x86_disable_taa = 0;
1548 static x86_taa_mitigation_t x86_taa_mitigation = X86_TAA_NOTHING;
1549 
1550 uint_t pentiumpro_bug4046376;
1551 
1552 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
1553 
1554 static char *x86_feature_names[NUM_X86_FEATURES] = {
1555 	"lgpg",
1556 	"tsc",
1557 	"msr",
1558 	"mtrr",
1559 	"pge",
1560 	"de",
1561 	"cmov",
1562 	"mmx",
1563 	"mca",
1564 	"pae",
1565 	"cv8",
1566 	"pat",
1567 	"sep",
1568 	"sse",
1569 	"sse2",
1570 	"htt",
1571 	"asysc",
1572 	"nx",
1573 	"sse3",
1574 	"cx16",
1575 	"cmp",
1576 	"tscp",
1577 	"mwait",
1578 	"sse4a",
1579 	"cpuid",
1580 	"ssse3",
1581 	"sse4_1",
1582 	"sse4_2",
1583 	"1gpg",
1584 	"clfsh",
1585 	"64",
1586 	"aes",
1587 	"pclmulqdq",
1588 	"xsave",
1589 	"avx",
1590 	"vmx",
1591 	"svm",
1592 	"topoext",
1593 	"f16c",
1594 	"rdrand",
1595 	"x2apic",
1596 	"avx2",
1597 	"bmi1",
1598 	"bmi2",
1599 	"fma",
1600 	"smep",
1601 	"smap",
1602 	"adx",
1603 	"rdseed",
1604 	"mpx",
1605 	"avx512f",
1606 	"avx512dq",
1607 	"avx512pf",
1608 	"avx512er",
1609 	"avx512cd",
1610 	"avx512bw",
1611 	"avx512vl",
1612 	"avx512fma",
1613 	"avx512vbmi",
1614 	"avx512_vpopcntdq",
1615 	"avx512_4vnniw",
1616 	"avx512_4fmaps",
1617 	"xsaveopt",
1618 	"xsavec",
1619 	"xsaves",
1620 	"sha",
1621 	"umip",
1622 	"pku",
1623 	"ospke",
1624 	"pcid",
1625 	"invpcid",
1626 	"ibrs",
1627 	"ibpb",
1628 	"stibp",
1629 	"ssbd",
1630 	"ssbd_virt",
1631 	"rdcl_no",
1632 	"ibrs_all",
1633 	"rsba",
1634 	"ssb_no",
1635 	"stibp_all",
1636 	"flush_cmd",
1637 	"l1d_vmentry_no",
1638 	"fsgsbase",
1639 	"clflushopt",
1640 	"clwb",
1641 	"monitorx",
1642 	"clzero",
1643 	"xop",
1644 	"fma4",
1645 	"tbm",
1646 	"avx512_vnni",
1647 	"amd_pcec",
1648 	"md_clear",
1649 	"mds_no",
1650 	"core_thermal",
1651 	"pkg_thermal",
1652 	"tsx_ctrl",
1653 	"taa_no",
1654 	"ppin",
1655 	"vaes",
1656 	"vpclmulqdq",
1657 	"lfence_serializing",
1658 	"gfni",
1659 	"avx512_vp2intersect",
1660 	"avx512_bitalg",
1661 	"avx512_vbmi2",
1662 	"avx512_bf16",
1663 	"auto_ibrs"
1664 };
1665 
1666 boolean_t
1667 is_x86_feature(void *featureset, uint_t feature)
1668 {
1669 	ASSERT(feature < NUM_X86_FEATURES);
1670 	return (BT_TEST((ulong_t *)featureset, feature));
1671 }
1672 
1673 void
1674 add_x86_feature(void *featureset, uint_t feature)
1675 {
1676 	ASSERT(feature < NUM_X86_FEATURES);
1677 	BT_SET((ulong_t *)featureset, feature);
1678 }
1679 
1680 void
1681 remove_x86_feature(void *featureset, uint_t feature)
1682 {
1683 	ASSERT(feature < NUM_X86_FEATURES);
1684 	BT_CLEAR((ulong_t *)featureset, feature);
1685 }
1686 
1687 boolean_t
1688 compare_x86_featureset(void *setA, void *setB)
1689 {
1690 	/*
1691 	 * We assume that the unused bits of the bitmap are always zero.
1692 	 */
1693 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
1694 		return (B_TRUE);
1695 	} else {
1696 		return (B_FALSE);
1697 	}
1698 }
1699 
1700 void
1701 print_x86_featureset(void *featureset)
1702 {
1703 	uint_t i;
1704 
1705 	for (i = 0; i < NUM_X86_FEATURES; i++) {
1706 		if (is_x86_feature(featureset, i)) {
1707 			cmn_err(CE_CONT, "?x86_feature: %s\n",
1708 			    x86_feature_names[i]);
1709 		}
1710 	}
1711 }
1712 
1713 /* Note: This is the maximum size for the CPU, not the size of the structure. */
1714 static size_t xsave_state_size = 0;
1715 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
1716 boolean_t xsave_force_disable = B_FALSE;
1717 extern int disable_smap;
1718 
1719 /*
1720  * This is set to platform type we are running on.
1721  */
1722 static int platform_type = -1;
1723 
1724 #if !defined(__xpv)
1725 /*
1726  * Variable to patch if hypervisor platform detection needs to be
1727  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
1728  */
1729 int enable_platform_detection = 1;
1730 #endif
1731 
1732 /*
1733  * monitor/mwait info.
1734  *
1735  * size_actual and buf_actual are the real address and size allocated to get
1736  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
1737  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
1738  * processor cache-line alignment, but this is not guarantied in the furture.
1739  */
1740 struct mwait_info {
1741 	size_t		mon_min;	/* min size to avoid missed wakeups */
1742 	size_t		mon_max;	/* size to avoid false wakeups */
1743 	size_t		size_actual;	/* size actually allocated */
1744 	void		*buf_actual;	/* memory actually allocated */
1745 	uint32_t	support;	/* processor support of monitor/mwait */
1746 };
1747 
1748 /*
1749  * xsave/xrestor info.
1750  *
1751  * This structure contains HW feature bits and the size of the xsave save area.
1752  * Note: the kernel declares a fixed size (AVX_XSAVE_SIZE) structure
1753  * (xsave_state) to describe the xsave layout. However, at runtime the
1754  * per-lwp xsave area is dynamically allocated based on xsav_max_size. The
1755  * xsave_state structure simply represents the legacy layout of the beginning
1756  * of the xsave area.
1757  */
1758 struct xsave_info {
1759 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
1760 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
1761 	size_t		xsav_max_size;  /* max size save area for HW features */
1762 	size_t		ymm_size;	/* AVX: size of ymm save area */
1763 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
1764 	size_t		bndregs_size;	/* MPX: size of bndregs save area */
1765 	size_t		bndregs_offset;	/* MPX: offset for bndregs save area */
1766 	size_t		bndcsr_size;	/* MPX: size of bndcsr save area */
1767 	size_t		bndcsr_offset;	/* MPX: offset for bndcsr save area */
1768 	size_t		opmask_size;	/* AVX512: size of opmask save */
1769 	size_t		opmask_offset;	/* AVX512: offset for opmask save */
1770 	size_t		zmmlo_size;	/* AVX512: size of zmm 256 save */
1771 	size_t		zmmlo_offset;	/* AVX512: offset for zmm 256 save */
1772 	size_t		zmmhi_size;	/* AVX512: size of zmm hi reg save */
1773 	size_t		zmmhi_offset;	/* AVX512: offset for zmm hi reg save */
1774 };
1775 
1776 
1777 /*
1778  * These constants determine how many of the elements of the
1779  * cpuid we cache in the cpuid_info data structure; the
1780  * remaining elements are accessible via the cpuid instruction.
1781  */
1782 
1783 #define	NMAX_CPI_STD	8		/* eax = 0 .. 7 */
1784 #define	NMAX_CPI_EXTD	0x22		/* eax = 0x80000000 .. 0x80000021 */
1785 #define	NMAX_CPI_TOPO	0x10		/* Sanity check on leaf 8X26, 1F */
1786 
1787 /*
1788  * See the big theory statement for a more detailed explanation of what some of
1789  * these members mean.
1790  */
1791 struct cpuid_info {
1792 	uint_t cpi_pass;		/* last pass completed */
1793 	/*
1794 	 * standard function information
1795 	 */
1796 	uint_t cpi_maxeax;		/* fn 0: %eax */
1797 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
1798 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
1799 
1800 	uint_t cpi_family;		/* fn 1: extended family */
1801 	uint_t cpi_model;		/* fn 1: extended model */
1802 	uint_t cpi_step;		/* fn 1: stepping */
1803 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
1804 					/*		AMD: package/socket # */
1805 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
1806 	int cpi_clogid;			/* fn 1: %ebx: thread # */
1807 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
1808 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
1809 	uint_t cpi_ncache;		/* fn 2: number of elements */
1810 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
1811 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
1812 	uint_t cpi_cache_leaf_size;	/* Number of cache elements */
1813 					/* Intel fn: 4, AMD fn: 8000001d */
1814 	struct cpuid_regs **cpi_cache_leaves;	/* Actual leaves from above */
1815 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 7 */
1816 	struct cpuid_regs cpi_sub7[1];	/* Leaf 7, sub-leaf 1 */
1817 	/*
1818 	 * extended function information
1819 	 */
1820 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
1821 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
1822 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
1823 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
1824 	uint8_t cpi_fp_amd_save;	/* AMD: FP error pointer save rqd. */
1825 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
1826 
1827 	id_t cpi_coreid;		/* same coreid => strands share core */
1828 	int cpi_pkgcoreid;		/* core number within single package */
1829 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
1830 					/* Intel: fn 4: %eax[31-26] */
1831 
1832 	/*
1833 	 * These values represent the number of bits that are required to store
1834 	 * information about the number of cores and threads.
1835 	 */
1836 	uint_t cpi_ncore_bits;
1837 	uint_t cpi_nthread_bits;
1838 	/*
1839 	 * supported feature information
1840 	 */
1841 	uint32_t cpi_support[6];
1842 #define	STD_EDX_FEATURES	0
1843 #define	AMD_EDX_FEATURES	1
1844 #define	TM_EDX_FEATURES		2
1845 #define	STD_ECX_FEATURES	3
1846 #define	AMD_ECX_FEATURES	4
1847 #define	STD_EBX_FEATURES	5
1848 	/*
1849 	 * Synthesized information, where known.
1850 	 */
1851 	x86_chiprev_t cpi_chiprev;	/* See X86_CHIPREV_* in x86_archext.h */
1852 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
1853 	uint32_t cpi_socket;		/* Chip package/socket type */
1854 	x86_uarchrev_t cpi_uarchrev;	/* Microarchitecture and revision */
1855 
1856 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
1857 	uint32_t cpi_apicid;
1858 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
1859 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
1860 					/* Intel: 1 */
1861 	uint_t cpi_compunitid;		/* AMD: ComputeUnit ID, Intel: coreid */
1862 	uint_t cpi_cores_per_compunit;	/* AMD: # of cores in the ComputeUnit */
1863 
1864 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
1865 
1866 	/*
1867 	 * AMD and Intel extended topology information. Leaf 8X26 (AMD) and
1868 	 * eventually leaf 0x1F (Intel).
1869 	 */
1870 	uint_t cpi_topo_nleaves;
1871 	struct cpuid_regs cpi_topo[NMAX_CPI_TOPO];
1872 };
1873 
1874 
1875 static struct cpuid_info cpuid_info0;
1876 
1877 /*
1878  * These bit fields are defined by the Intel Application Note AP-485
1879  * "Intel Processor Identification and the CPUID Instruction"
1880  */
1881 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
1882 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
1883 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
1884 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
1885 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
1886 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
1887 
1888 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
1889 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
1890 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
1891 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
1892 #define	CPI_FEATURES_7_0_EBX(cpi)	((cpi)->cpi_std[7].cp_ebx)
1893 #define	CPI_FEATURES_7_0_ECX(cpi)	((cpi)->cpi_std[7].cp_ecx)
1894 #define	CPI_FEATURES_7_0_EDX(cpi)	((cpi)->cpi_std[7].cp_edx)
1895 #define	CPI_FEATURES_7_1_EAX(cpi)	((cpi)->cpi_sub7[0].cp_eax)
1896 
1897 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
1898 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
1899 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
1900 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
1901 
1902 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
1903 #define	CPI_XMAXEAX_MAX		0x80000100
1904 #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
1905 #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
1906 
1907 /*
1908  * Function 4 (Deterministic Cache Parameters) macros
1909  * Defined by Intel Application Note AP-485
1910  */
1911 #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
1912 #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
1913 #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
1914 #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
1915 #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
1916 #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
1917 #define	CPI_CACHE_TYPE_DONE	0
1918 #define	CPI_CACHE_TYPE_DATA	1
1919 #define	CPI_CACHE_TYPE_INSTR	2
1920 #define	CPI_CACHE_TYPE_UNIFIED	3
1921 #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
1922 
1923 #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
1924 #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
1925 #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
1926 
1927 #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
1928 
1929 #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
1930 
1931 
1932 /*
1933  * A couple of shorthand macros to identify "later" P6-family chips
1934  * like the Pentium M and Core.  First, the "older" P6-based stuff
1935  * (loosely defined as "pre-Pentium-4"):
1936  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
1937  */
1938 #define	IS_LEGACY_P6(cpi) (			\
1939 	cpi->cpi_family == 6 &&			\
1940 		(cpi->cpi_model == 1 ||		\
1941 		cpi->cpi_model == 3 ||		\
1942 		cpi->cpi_model == 5 ||		\
1943 		cpi->cpi_model == 6 ||		\
1944 		cpi->cpi_model == 7 ||		\
1945 		cpi->cpi_model == 8 ||		\
1946 		cpi->cpi_model == 0xA ||	\
1947 		cpi->cpi_model == 0xB)		\
1948 )
1949 
1950 /* A "new F6" is everything with family 6 that's not the above */
1951 #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
1952 
1953 /* Extended family/model support */
1954 #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
1955 	cpi->cpi_family >= 0xf)
1956 
1957 /*
1958  * Info for monitor/mwait idle loop.
1959  *
1960  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
1961  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
1962  * 2006.
1963  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
1964  * Documentation Updates" #33633, Rev 2.05, December 2006.
1965  */
1966 #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
1967 #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
1968 #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
1969 #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
1970 #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
1971 #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
1972 #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
1973 #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
1974 /*
1975  * Number of sub-cstates for a given c-state.
1976  */
1977 #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
1978 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
1979 
1980 /*
1981  * XSAVE leaf 0xD enumeration
1982  */
1983 #define	CPUID_LEAFD_2_YMM_OFFSET	576
1984 #define	CPUID_LEAFD_2_YMM_SIZE		256
1985 
1986 /*
1987  * Common extended leaf names to cut down on typos.
1988  */
1989 #define	CPUID_LEAF_EXT_0		0x80000000
1990 #define	CPUID_LEAF_EXT_8		0x80000008
1991 #define	CPUID_LEAF_EXT_1d		0x8000001d
1992 #define	CPUID_LEAF_EXT_1e		0x8000001e
1993 #define	CPUID_LEAF_EXT_21		0x80000021
1994 #define	CPUID_LEAF_EXT_26		0x80000026
1995 
1996 /*
1997  * Functions we consume from cpuid_subr.c;  don't publish these in a header
1998  * file to try and keep people using the expected cpuid_* interfaces.
1999  */
2000 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
2001 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
2002 extern x86_chiprev_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
2003 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
2004 extern x86_uarchrev_t _cpuid_uarchrev(uint_t, uint_t, uint_t, uint_t);
2005 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
2006 
2007 /*
2008  * Apply up various platform-dependent restrictions where the
2009  * underlying platform restrictions mean the CPU can be marked
2010  * as less capable than its cpuid instruction would imply.
2011  */
2012 #if defined(__xpv)
2013 static void
2014 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
2015 {
2016 	switch (eax) {
2017 	case 1: {
2018 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
2019 		    0 : CPUID_INTC_EDX_MCA;
2020 		cp->cp_edx &=
2021 		    ~(mcamask |
2022 		    CPUID_INTC_EDX_PSE |
2023 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
2024 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
2025 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
2026 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
2027 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
2028 		break;
2029 	}
2030 
2031 	case 0x80000001:
2032 		cp->cp_edx &=
2033 		    ~(CPUID_AMD_EDX_PSE |
2034 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
2035 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
2036 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
2037 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
2038 		    CPUID_AMD_EDX_TSCP);
2039 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
2040 		break;
2041 	default:
2042 		break;
2043 	}
2044 
2045 	switch (vendor) {
2046 	case X86_VENDOR_Intel:
2047 		switch (eax) {
2048 		case 4:
2049 			/*
2050 			 * Zero out the (ncores-per-chip - 1) field
2051 			 */
2052 			cp->cp_eax &= 0x03fffffff;
2053 			break;
2054 		default:
2055 			break;
2056 		}
2057 		break;
2058 	case X86_VENDOR_AMD:
2059 	case X86_VENDOR_HYGON:
2060 		switch (eax) {
2061 
2062 		case 0x80000001:
2063 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
2064 			break;
2065 
2066 		case CPUID_LEAF_EXT_8:
2067 			/*
2068 			 * Zero out the (ncores-per-chip - 1) field
2069 			 */
2070 			cp->cp_ecx &= 0xffffff00;
2071 			break;
2072 		default:
2073 			break;
2074 		}
2075 		break;
2076 	default:
2077 		break;
2078 	}
2079 }
2080 #else
2081 #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
2082 #endif
2083 
2084 /*
2085  *  Some undocumented ways of patching the results of the cpuid
2086  *  instruction to permit running Solaris 10 on future cpus that
2087  *  we don't currently support.  Could be set to non-zero values
2088  *  via settings in eeprom.
2089  */
2090 
2091 uint32_t cpuid_feature_ecx_include;
2092 uint32_t cpuid_feature_ecx_exclude;
2093 uint32_t cpuid_feature_edx_include;
2094 uint32_t cpuid_feature_edx_exclude;
2095 
2096 /*
2097  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
2098  */
2099 void
2100 cpuid_alloc_space(cpu_t *cpu)
2101 {
2102 	/*
2103 	 * By convention, cpu0 is the boot cpu, which is set up
2104 	 * before memory allocation is available.  All other cpus get
2105 	 * their cpuid_info struct allocated here.
2106 	 */
2107 	ASSERT(cpu->cpu_id != 0);
2108 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
2109 	cpu->cpu_m.mcpu_cpi =
2110 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
2111 }
2112 
2113 void
2114 cpuid_free_space(cpu_t *cpu)
2115 {
2116 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2117 	int i;
2118 
2119 	ASSERT(cpi != NULL);
2120 	ASSERT(cpi != &cpuid_info0);
2121 
2122 	/*
2123 	 * Free up any cache leaf related dynamic storage. The first entry was
2124 	 * cached from the standard cpuid storage, so we should not free it.
2125 	 */
2126 	for (i = 1; i < cpi->cpi_cache_leaf_size; i++)
2127 		kmem_free(cpi->cpi_cache_leaves[i], sizeof (struct cpuid_regs));
2128 	if (cpi->cpi_cache_leaf_size > 0)
2129 		kmem_free(cpi->cpi_cache_leaves,
2130 		    cpi->cpi_cache_leaf_size * sizeof (struct cpuid_regs *));
2131 
2132 	kmem_free(cpi, sizeof (*cpi));
2133 	cpu->cpu_m.mcpu_cpi = NULL;
2134 }
2135 
2136 #if !defined(__xpv)
2137 /*
2138  * Determine the type of the underlying platform. This is used to customize
2139  * initialization of various subsystems (e.g. TSC). determine_platform() must
2140  * only ever be called once to prevent two processors from seeing different
2141  * values of platform_type. Must be called before cpuid_pass_ident(), the
2142  * earliest consumer to execute; the identification pass will call
2143  * synth_amd_info() to compute the chiprev, which in turn calls get_hwenv().
2144  */
2145 void
2146 determine_platform(void)
2147 {
2148 	struct cpuid_regs cp;
2149 	uint32_t base;
2150 	uint32_t regs[4];
2151 	char *hvstr = (char *)regs;
2152 
2153 	ASSERT(platform_type == -1);
2154 
2155 	platform_type = HW_NATIVE;
2156 
2157 	if (!enable_platform_detection)
2158 		return;
2159 
2160 	/*
2161 	 * If Hypervisor CPUID bit is set, try to determine hypervisor
2162 	 * vendor signature, and set platform type accordingly.
2163 	 *
2164 	 * References:
2165 	 * http://lkml.org/lkml/2008/10/1/246
2166 	 * http://kb.vmware.com/kb/1009458
2167 	 */
2168 	cp.cp_eax = 0x1;
2169 	(void) __cpuid_insn(&cp);
2170 	if ((cp.cp_ecx & CPUID_INTC_ECX_HV) != 0) {
2171 		cp.cp_eax = 0x40000000;
2172 		(void) __cpuid_insn(&cp);
2173 		regs[0] = cp.cp_ebx;
2174 		regs[1] = cp.cp_ecx;
2175 		regs[2] = cp.cp_edx;
2176 		regs[3] = 0;
2177 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0) {
2178 			platform_type = HW_XEN_HVM;
2179 			return;
2180 		}
2181 		if (strcmp(hvstr, HVSIG_VMWARE) == 0) {
2182 			platform_type = HW_VMWARE;
2183 			return;
2184 		}
2185 		if (strcmp(hvstr, HVSIG_KVM) == 0) {
2186 			platform_type = HW_KVM;
2187 			return;
2188 		}
2189 		if (strcmp(hvstr, HVSIG_BHYVE) == 0) {
2190 			platform_type = HW_BHYVE;
2191 			return;
2192 		}
2193 		if (strcmp(hvstr, HVSIG_MICROSOFT) == 0)
2194 			platform_type = HW_MICROSOFT;
2195 	} else {
2196 		/*
2197 		 * Check older VMware hardware versions. VMware hypervisor is
2198 		 * detected by performing an IN operation to VMware hypervisor
2199 		 * port and checking that value returned in %ebx is VMware
2200 		 * hypervisor magic value.
2201 		 *
2202 		 * References: http://kb.vmware.com/kb/1009458
2203 		 */
2204 		vmware_port(VMWARE_HVCMD_GETVERSION, regs);
2205 		if (regs[1] == VMWARE_HVMAGIC) {
2206 			platform_type = HW_VMWARE;
2207 			return;
2208 		}
2209 	}
2210 
2211 	/*
2212 	 * Check Xen hypervisor. In a fully virtualized domain,
2213 	 * Xen's pseudo-cpuid function returns a string representing the
2214 	 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
2215 	 * supported cpuid function. We need at least a (base + 2) leaf value
2216 	 * to do what we want to do. Try different base values, since the
2217 	 * hypervisor might use a different one depending on whether Hyper-V
2218 	 * emulation is switched on by default or not.
2219 	 */
2220 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
2221 		cp.cp_eax = base;
2222 		(void) __cpuid_insn(&cp);
2223 		regs[0] = cp.cp_ebx;
2224 		regs[1] = cp.cp_ecx;
2225 		regs[2] = cp.cp_edx;
2226 		regs[3] = 0;
2227 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0 &&
2228 		    cp.cp_eax >= (base + 2)) {
2229 			platform_type &= ~HW_NATIVE;
2230 			platform_type |= HW_XEN_HVM;
2231 			return;
2232 		}
2233 	}
2234 }
2235 
2236 int
2237 get_hwenv(void)
2238 {
2239 	ASSERT(platform_type != -1);
2240 	return (platform_type);
2241 }
2242 
2243 int
2244 is_controldom(void)
2245 {
2246 	return (0);
2247 }
2248 
2249 #else
2250 
2251 int
2252 get_hwenv(void)
2253 {
2254 	return (HW_XEN_PV);
2255 }
2256 
2257 int
2258 is_controldom(void)
2259 {
2260 	return (DOMAIN_IS_INITDOMAIN(xen_info));
2261 }
2262 
2263 #endif	/* __xpv */
2264 
2265 /*
2266  * Gather the extended topology information. This should be the same for both
2267  * AMD leaf 8X26 and Intel leaf 0x1F (though the data interpretation varies).
2268  */
2269 static void
2270 cpuid_gather_ext_topo_leaf(struct cpuid_info *cpi, uint32_t leaf)
2271 {
2272 	uint_t i;
2273 
2274 	for (i = 0; i < ARRAY_SIZE(cpi->cpi_topo); i++) {
2275 		struct cpuid_regs *regs = &cpi->cpi_topo[i];
2276 
2277 		bzero(regs, sizeof (struct cpuid_regs));
2278 		regs->cp_eax = leaf;
2279 		regs->cp_ecx = i;
2280 
2281 		(void) __cpuid_insn(regs);
2282 		if (CPUID_AMD_8X26_ECX_TYPE(regs->cp_ecx) ==
2283 		    CPUID_AMD_8X26_TYPE_DONE) {
2284 			break;
2285 		}
2286 	}
2287 
2288 	cpi->cpi_topo_nleaves = i;
2289 }
2290 
2291 /*
2292  * Make sure that we have gathered all of the CPUID leaves that we might need to
2293  * determine topology. We assume that the standard leaf 1 has already been done
2294  * and that xmaxeax has already been calculated.
2295  */
2296 static void
2297 cpuid_gather_amd_topology_leaves(cpu_t *cpu)
2298 {
2299 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2300 
2301 	if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2302 		struct cpuid_regs *cp;
2303 
2304 		cp = &cpi->cpi_extd[8];
2305 		cp->cp_eax = CPUID_LEAF_EXT_8;
2306 		(void) __cpuid_insn(cp);
2307 		platform_cpuid_mangle(cpi->cpi_vendor, CPUID_LEAF_EXT_8, cp);
2308 	}
2309 
2310 	if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
2311 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1e) {
2312 		struct cpuid_regs *cp;
2313 
2314 		cp = &cpi->cpi_extd[0x1e];
2315 		cp->cp_eax = CPUID_LEAF_EXT_1e;
2316 		(void) __cpuid_insn(cp);
2317 	}
2318 
2319 	if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_26) {
2320 		cpuid_gather_ext_topo_leaf(cpi, CPUID_LEAF_EXT_26);
2321 	}
2322 }
2323 
2324 /*
2325  * Get the APIC ID for this processor. If Leaf B is present and valid, we prefer
2326  * it to everything else. If not, and we're on an AMD system where 8000001e is
2327  * valid, then we use that. Othewrise, we fall back to the default value for the
2328  * APIC ID in leaf 1.
2329  */
2330 static uint32_t
2331 cpuid_gather_apicid(struct cpuid_info *cpi)
2332 {
2333 	/*
2334 	 * Leaf B changes based on the arguments to it. Because we don't cache
2335 	 * it, we need to gather it again.
2336 	 */
2337 	if (cpi->cpi_maxeax >= 0xB) {
2338 		struct cpuid_regs regs;
2339 		struct cpuid_regs *cp;
2340 
2341 		cp = &regs;
2342 		cp->cp_eax = 0xB;
2343 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
2344 		(void) __cpuid_insn(cp);
2345 
2346 		if (cp->cp_ebx != 0) {
2347 			return (cp->cp_edx);
2348 		}
2349 	}
2350 
2351 	if ((cpi->cpi_vendor == X86_VENDOR_AMD ||
2352 	    cpi->cpi_vendor == X86_VENDOR_HYGON) &&
2353 	    is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
2354 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1e) {
2355 		return (cpi->cpi_extd[0x1e].cp_eax);
2356 	}
2357 
2358 	return (CPI_APIC_ID(cpi));
2359 }
2360 
2361 /*
2362  * For AMD processors, attempt to calculate the number of chips and cores that
2363  * exist. The way that we do this varies based on the generation, because the
2364  * generations themselves have changed dramatically.
2365  *
2366  * If cpuid leaf 0x80000008 exists, that generally tells us the number of cores.
2367  * However, with the advent of family 17h (Zen) it actually tells us the number
2368  * of threads, so we need to look at leaf 0x8000001e if available to determine
2369  * its value. Otherwise, for all prior families, the number of enabled cores is
2370  * the same as threads.
2371  *
2372  * If we do not have leaf 0x80000008, then we assume that this processor does
2373  * not have anything. AMD's older CPUID specification says there's no reason to
2374  * fall back to leaf 1.
2375  *
2376  * In some virtualization cases we will not have leaf 8000001e or it will be
2377  * zero. When that happens we assume the number of threads is one.
2378  */
2379 static void
2380 cpuid_amd_ncores(struct cpuid_info *cpi, uint_t *ncpus, uint_t *ncores)
2381 {
2382 	uint_t nthreads, nthread_per_core;
2383 
2384 	nthreads = nthread_per_core = 1;
2385 
2386 	if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2387 		nthreads = BITX(cpi->cpi_extd[8].cp_ecx, 7, 0) + 1;
2388 	} else if ((cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_HTT) != 0) {
2389 		nthreads = CPI_CPU_COUNT(cpi);
2390 	}
2391 
2392 	/*
2393 	 * For us to have threads, and know about it, we have to be at least at
2394 	 * family 17h and have the cpuid bit that says we have extended
2395 	 * topology.
2396 	 */
2397 	if (cpi->cpi_family >= 0x17 &&
2398 	    is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
2399 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1e) {
2400 		nthread_per_core = BITX(cpi->cpi_extd[0x1e].cp_ebx, 15, 8) + 1;
2401 	}
2402 
2403 	*ncpus = nthreads;
2404 	*ncores = nthreads / nthread_per_core;
2405 }
2406 
2407 /*
2408  * Seed the initial values for the cores and threads for an Intel based
2409  * processor. These values will be overwritten if we detect that the processor
2410  * supports CPUID leaf 0xb.
2411  */
2412 static void
2413 cpuid_intel_ncores(struct cpuid_info *cpi, uint_t *ncpus, uint_t *ncores)
2414 {
2415 	/*
2416 	 * Only seed the number of physical cores from the first level leaf 4
2417 	 * information. The number of threads there indicate how many share the
2418 	 * L1 cache, which may or may not have anything to do with the number of
2419 	 * logical CPUs per core.
2420 	 */
2421 	if (cpi->cpi_maxeax >= 4) {
2422 		*ncores = BITX(cpi->cpi_std[4].cp_eax, 31, 26) + 1;
2423 	} else {
2424 		*ncores = 1;
2425 	}
2426 
2427 	if ((cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_HTT) != 0) {
2428 		*ncpus = CPI_CPU_COUNT(cpi);
2429 	} else {
2430 		*ncpus = *ncores;
2431 	}
2432 }
2433 
2434 static boolean_t
2435 cpuid_leafB_getids(cpu_t *cpu)
2436 {
2437 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2438 	struct cpuid_regs regs;
2439 	struct cpuid_regs *cp;
2440 
2441 	if (cpi->cpi_maxeax < 0xB)
2442 		return (B_FALSE);
2443 
2444 	cp = &regs;
2445 	cp->cp_eax = 0xB;
2446 	cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
2447 
2448 	(void) __cpuid_insn(cp);
2449 
2450 	/*
2451 	 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
2452 	 * indicates that the extended topology enumeration leaf is
2453 	 * available.
2454 	 */
2455 	if (cp->cp_ebx != 0) {
2456 		uint32_t x2apic_id = 0;
2457 		uint_t coreid_shift = 0;
2458 		uint_t ncpu_per_core = 1;
2459 		uint_t chipid_shift = 0;
2460 		uint_t ncpu_per_chip = 1;
2461 		uint_t i;
2462 		uint_t level;
2463 
2464 		for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
2465 			cp->cp_eax = 0xB;
2466 			cp->cp_ecx = i;
2467 
2468 			(void) __cpuid_insn(cp);
2469 			level = CPI_CPU_LEVEL_TYPE(cp);
2470 
2471 			if (level == 1) {
2472 				x2apic_id = cp->cp_edx;
2473 				coreid_shift = BITX(cp->cp_eax, 4, 0);
2474 				ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
2475 			} else if (level == 2) {
2476 				x2apic_id = cp->cp_edx;
2477 				chipid_shift = BITX(cp->cp_eax, 4, 0);
2478 				ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
2479 			}
2480 		}
2481 
2482 		/*
2483 		 * cpi_apicid is taken care of in cpuid_gather_apicid.
2484 		 */
2485 		cpi->cpi_ncpu_per_chip = ncpu_per_chip;
2486 		cpi->cpi_ncore_per_chip = ncpu_per_chip /
2487 		    ncpu_per_core;
2488 		cpi->cpi_chipid = x2apic_id >> chipid_shift;
2489 		cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
2490 		cpi->cpi_coreid = x2apic_id >> coreid_shift;
2491 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
2492 		cpi->cpi_procnodeid = cpi->cpi_chipid;
2493 		cpi->cpi_compunitid = cpi->cpi_coreid;
2494 
2495 		if (coreid_shift > 0 && chipid_shift > coreid_shift) {
2496 			cpi->cpi_nthread_bits = coreid_shift;
2497 			cpi->cpi_ncore_bits = chipid_shift - coreid_shift;
2498 		}
2499 
2500 		return (B_TRUE);
2501 	} else {
2502 		return (B_FALSE);
2503 	}
2504 }
2505 
2506 static void
2507 cpuid_intel_getids(cpu_t *cpu, void *feature)
2508 {
2509 	uint_t i;
2510 	uint_t chipid_shift = 0;
2511 	uint_t coreid_shift = 0;
2512 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2513 
2514 	/*
2515 	 * There are no compute units or processor nodes currently on Intel.
2516 	 * Always set these to one.
2517 	 */
2518 	cpi->cpi_procnodes_per_pkg = 1;
2519 	cpi->cpi_cores_per_compunit = 1;
2520 
2521 	/*
2522 	 * If cpuid Leaf B is present, use that to try and get this information.
2523 	 * It will be the most accurate for Intel CPUs.
2524 	 */
2525 	if (cpuid_leafB_getids(cpu))
2526 		return;
2527 
2528 	/*
2529 	 * In this case, we have the leaf 1 and leaf 4 values for ncpu_per_chip
2530 	 * and ncore_per_chip. These represent the largest power of two values
2531 	 * that we need to cover all of the IDs in the system. Therefore, we use
2532 	 * those values to seed the number of bits needed to cover information
2533 	 * in the case when leaf B is not available. These values will probably
2534 	 * be larger than required, but that's OK.
2535 	 */
2536 	cpi->cpi_nthread_bits = ddi_fls(cpi->cpi_ncpu_per_chip);
2537 	cpi->cpi_ncore_bits = ddi_fls(cpi->cpi_ncore_per_chip);
2538 
2539 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
2540 		chipid_shift++;
2541 
2542 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
2543 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
2544 
2545 	if (is_x86_feature(feature, X86FSET_CMP)) {
2546 		/*
2547 		 * Multi-core (and possibly multi-threaded)
2548 		 * processors.
2549 		 */
2550 		uint_t ncpu_per_core = 0;
2551 
2552 		if (cpi->cpi_ncore_per_chip == 1)
2553 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
2554 		else if (cpi->cpi_ncore_per_chip > 1)
2555 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
2556 			    cpi->cpi_ncore_per_chip;
2557 		/*
2558 		 * 8bit APIC IDs on dual core Pentiums
2559 		 * look like this:
2560 		 *
2561 		 * +-----------------------+------+------+
2562 		 * | Physical Package ID   |  MC  |  HT  |
2563 		 * +-----------------------+------+------+
2564 		 * <------- chipid -------->
2565 		 * <------- coreid --------------->
2566 		 *			   <--- clogid -->
2567 		 *			   <------>
2568 		 *			   pkgcoreid
2569 		 *
2570 		 * Where the number of bits necessary to
2571 		 * represent MC and HT fields together equals
2572 		 * to the minimum number of bits necessary to
2573 		 * store the value of cpi->cpi_ncpu_per_chip.
2574 		 * Of those bits, the MC part uses the number
2575 		 * of bits necessary to store the value of
2576 		 * cpi->cpi_ncore_per_chip.
2577 		 */
2578 		for (i = 1; i < ncpu_per_core; i <<= 1)
2579 			coreid_shift++;
2580 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
2581 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
2582 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
2583 		/*
2584 		 * Single-core multi-threaded processors.
2585 		 */
2586 		cpi->cpi_coreid = cpi->cpi_chipid;
2587 		cpi->cpi_pkgcoreid = 0;
2588 	} else {
2589 		/*
2590 		 * Single-core single-thread processors.
2591 		 */
2592 		cpi->cpi_coreid = cpu->cpu_id;
2593 		cpi->cpi_pkgcoreid = 0;
2594 	}
2595 	cpi->cpi_procnodeid = cpi->cpi_chipid;
2596 	cpi->cpi_compunitid = cpi->cpi_coreid;
2597 }
2598 
2599 /*
2600  * Historically, AMD has had CMP chips with only a single thread per core.
2601  * However, starting in family 17h (Zen), this has changed and they now have
2602  * multiple threads. Our internal core id needs to be a unique value.
2603  *
2604  * To determine the core id of an AMD system, if we're from a family before 17h,
2605  * then we just use the cpu id, as that gives us a good value that will be
2606  * unique for each core. If instead, we're on family 17h or later, then we need
2607  * to do something more complicated. CPUID leaf 0x8000001e can tell us
2608  * how many threads are in the system. Based on that, we'll shift the APIC ID.
2609  * We can't use the normal core id in that leaf as it's only unique within the
2610  * socket, which is perfect for cpi_pkgcoreid, but not us.
2611  */
2612 static id_t
2613 cpuid_amd_get_coreid(cpu_t *cpu)
2614 {
2615 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2616 
2617 	if (cpi->cpi_family >= 0x17 &&
2618 	    is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
2619 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1e) {
2620 		uint_t nthreads = BITX(cpi->cpi_extd[0x1e].cp_ebx, 15, 8) + 1;
2621 		if (nthreads > 1) {
2622 			VERIFY3U(nthreads, ==, 2);
2623 			return (cpi->cpi_apicid >> 1);
2624 		}
2625 	}
2626 
2627 	return (cpu->cpu_id);
2628 }
2629 
2630 /*
2631  * IDs on AMD is a more challenging task. This is notable because of the
2632  * following two facts:
2633  *
2634  *  1. Before family 0x17 (Zen), there was no support for SMT and there was
2635  *     also no way to get an actual unique core id from the system. As such, we
2636  *     synthesize this case by using cpu->cpu_id.  This scheme does not,
2637  *     however, guarantee that sibling cores of a chip will have sequential
2638  *     coreids starting at a multiple of the number of cores per chip - that is
2639  *     usually the case, but if the APIC IDs have been set up in a different
2640  *     order then we need to perform a few more gymnastics for the pkgcoreid.
2641  *
2642  *  2. In families 0x15 and 16x (Bulldozer and co.) the cores came in groups
2643  *     called compute units. These compute units share the L1I cache, L2 cache,
2644  *     and the FPU. To deal with this, a new topology leaf was added in
2645  *     0x8000001e. However, parts of this leaf have different meanings
2646  *     once we get to family 0x17.
2647  */
2648 
2649 static void
2650 cpuid_amd_getids(cpu_t *cpu, uchar_t *features)
2651 {
2652 	int i, first_half, coreidsz;
2653 	uint32_t nb_caps_reg;
2654 	uint_t node2_1;
2655 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2656 	struct cpuid_regs *cp;
2657 
2658 	/*
2659 	 * Calculate the core id (this comes from hardware in family 0x17 if it
2660 	 * hasn't been stripped by virtualization). We always set the compute
2661 	 * unit id to the same value. Also, initialize the default number of
2662 	 * cores per compute unit and nodes per package. This will be
2663 	 * overwritten when we know information about a particular family.
2664 	 */
2665 	cpi->cpi_coreid = cpuid_amd_get_coreid(cpu);
2666 	cpi->cpi_compunitid = cpi->cpi_coreid;
2667 	cpi->cpi_cores_per_compunit = 1;
2668 	cpi->cpi_procnodes_per_pkg = 1;
2669 
2670 	/*
2671 	 * To construct the logical ID, we need to determine how many APIC IDs
2672 	 * are dedicated to the cores and threads. This is provided for us in
2673 	 * 0x80000008. However, if it's not present (say due to virtualization),
2674 	 * then we assume it's one. This should be present on all 64-bit AMD
2675 	 * processors.  It was added in family 0xf (Hammer).
2676 	 */
2677 	if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2678 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
2679 
2680 		/*
2681 		 * In AMD parlance chip is really a node while illumos
2682 		 * uses chip as equivalent to socket/package.
2683 		 */
2684 		if (coreidsz == 0) {
2685 			/* Use legacy method */
2686 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
2687 				coreidsz++;
2688 			if (coreidsz == 0)
2689 				coreidsz = 1;
2690 		}
2691 	} else {
2692 		/* Assume single-core part */
2693 		coreidsz = 1;
2694 	}
2695 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << coreidsz) - 1);
2696 
2697 	/*
2698 	 * The package core ID varies depending on the family. While it may be
2699 	 * tempting to use the CPUID_LEAF_EXT_1e %ebx core id, unfortunately,
2700 	 * this value is the core id in the given node. For non-virtualized
2701 	 * family 17h, we need to take the logical core id and shift off the
2702 	 * threads like we do when getting the core id.  Otherwise, we can use
2703 	 * the clogid as is. When family 17h is virtualized, the clogid should
2704 	 * be sufficient as if we don't have valid data in the leaf, then we
2705 	 * won't think we have SMT, in which case the cpi_clogid should be
2706 	 * sufficient.
2707 	 */
2708 	if (cpi->cpi_family >= 0x17 &&
2709 	    is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
2710 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1e &&
2711 	    cpi->cpi_extd[0x1e].cp_ebx != 0) {
2712 		uint_t nthreads = BITX(cpi->cpi_extd[0x1e].cp_ebx, 15, 8) + 1;
2713 		if (nthreads > 1) {
2714 			VERIFY3U(nthreads, ==, 2);
2715 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> 1;
2716 		} else {
2717 			cpi->cpi_pkgcoreid = cpi->cpi_clogid;
2718 		}
2719 	} else {
2720 		cpi->cpi_pkgcoreid = cpi->cpi_clogid;
2721 	}
2722 
2723 	/*
2724 	 * Obtain the node ID and compute unit IDs. If we're on family 0x15
2725 	 * (bulldozer) or newer, then we can derive all of this from leaf
2726 	 * CPUID_LEAF_EXT_1e. Otherwise, the method varies by family.
2727 	 */
2728 	if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
2729 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1e) {
2730 		cp = &cpi->cpi_extd[0x1e];
2731 
2732 		cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1;
2733 		cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0);
2734 
2735 		/*
2736 		 * For Bulldozer-era CPUs, recalculate the compute unit
2737 		 * information.
2738 		 */
2739 		if (cpi->cpi_family >= 0x15 && cpi->cpi_family < 0x17) {
2740 			cpi->cpi_cores_per_compunit =
2741 			    BITX(cp->cp_ebx, 15, 8) + 1;
2742 			cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0) +
2743 			    (cpi->cpi_ncore_per_chip /
2744 			    cpi->cpi_cores_per_compunit) *
2745 			    (cpi->cpi_procnodeid /
2746 			    cpi->cpi_procnodes_per_pkg);
2747 		}
2748 	} else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) {
2749 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
2750 	} else if (cpi->cpi_family == 0x10) {
2751 		/*
2752 		 * See if we are a multi-node processor.
2753 		 * All processors in the system have the same number of nodes
2754 		 */
2755 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
2756 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
2757 			/* Single-node */
2758 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
2759 			    coreidsz);
2760 		} else {
2761 
2762 			/*
2763 			 * Multi-node revision D (2 nodes per package
2764 			 * are supported)
2765 			 */
2766 			cpi->cpi_procnodes_per_pkg = 2;
2767 
2768 			first_half = (cpi->cpi_pkgcoreid <=
2769 			    (cpi->cpi_ncore_per_chip/2 - 1));
2770 
2771 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
2772 				/* We are BSP */
2773 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
2774 			} else {
2775 
2776 				/* We are AP */
2777 				/* NodeId[2:1] bits to use for reading F3xe8 */
2778 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
2779 
2780 				nb_caps_reg =
2781 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
2782 
2783 				/*
2784 				 * Check IntNodeNum bit (31:30, but bit 31 is
2785 				 * always 0 on dual-node processors)
2786 				 */
2787 				if (BITX(nb_caps_reg, 30, 30) == 0)
2788 					cpi->cpi_procnodeid = node2_1 +
2789 					    !first_half;
2790 				else
2791 					cpi->cpi_procnodeid = node2_1 +
2792 					    first_half;
2793 			}
2794 		}
2795 	} else {
2796 		cpi->cpi_procnodeid = 0;
2797 	}
2798 
2799 	cpi->cpi_chipid =
2800 	    cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
2801 
2802 	cpi->cpi_ncore_bits = coreidsz;
2803 	cpi->cpi_nthread_bits = ddi_fls(cpi->cpi_ncpu_per_chip /
2804 	    cpi->cpi_ncore_per_chip);
2805 }
2806 
2807 static void
2808 spec_uarch_flush_noop(void)
2809 {
2810 }
2811 
2812 /*
2813  * When microcode is present that mitigates MDS, this wrmsr will also flush the
2814  * MDS-related micro-architectural state that would normally happen by calling
2815  * x86_md_clear().
2816  */
2817 static void
2818 spec_uarch_flush_msr(void)
2819 {
2820 	wrmsr(MSR_IA32_FLUSH_CMD, IA32_FLUSH_CMD_L1D);
2821 }
2822 
2823 /*
2824  * This function points to a function that will flush certain
2825  * micro-architectural state on the processor. This flush is used to mitigate
2826  * two different classes of Intel CPU vulnerabilities: L1TF and MDS. This
2827  * function can point to one of three functions:
2828  *
2829  * - A noop which is done because we either are vulnerable, but do not have
2830  *   microcode available to help deal with a fix, or because we aren't
2831  *   vulnerable.
2832  *
2833  * - spec_uarch_flush_msr which will issue an L1D flush and if microcode to
2834  *   mitigate MDS is present, also perform the equivalent of the MDS flush;
2835  *   however, it only flushes the MDS related micro-architectural state on the
2836  *   current hyperthread, it does not do anything for the twin.
2837  *
2838  * - x86_md_clear which will flush the MDS related state. This is done when we
2839  *   have a processor that is vulnerable to MDS, but is not vulnerable to L1TF
2840  *   (RDCL_NO is set).
2841  */
2842 void (*spec_uarch_flush)(void) = spec_uarch_flush_noop;
2843 
2844 static void
2845 cpuid_update_md_clear(cpu_t *cpu, uchar_t *featureset)
2846 {
2847 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2848 
2849 	/*
2850 	 * While RDCL_NO indicates that one of the MDS vulnerabilities (MSBDS)
2851 	 * has been fixed in hardware, it doesn't cover everything related to
2852 	 * MDS. Therefore we can only rely on MDS_NO to determine that we don't
2853 	 * need to mitigate this.
2854 	 */
2855 	if (cpi->cpi_vendor != X86_VENDOR_Intel ||
2856 	    is_x86_feature(featureset, X86FSET_MDS_NO)) {
2857 		return;
2858 	}
2859 
2860 	if (is_x86_feature(featureset, X86FSET_MD_CLEAR)) {
2861 		const uint8_t nop = NOP_INSTR;
2862 		uint8_t *md = (uint8_t *)x86_md_clear;
2863 
2864 		*md = nop;
2865 	}
2866 
2867 	membar_producer();
2868 }
2869 
2870 static void
2871 cpuid_update_l1d_flush(cpu_t *cpu, uchar_t *featureset)
2872 {
2873 	boolean_t need_l1d, need_mds;
2874 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2875 
2876 	/*
2877 	 * If we're not on Intel or we've mitigated both RDCL and MDS in
2878 	 * hardware, then there's nothing left for us to do for enabling the
2879 	 * flush. We can also go ahead and say that SMT exclusion is
2880 	 * unnecessary.
2881 	 */
2882 	if (cpi->cpi_vendor != X86_VENDOR_Intel ||
2883 	    (is_x86_feature(featureset, X86FSET_RDCL_NO) &&
2884 	    is_x86_feature(featureset, X86FSET_MDS_NO))) {
2885 		extern int smt_exclusion;
2886 		smt_exclusion = 0;
2887 		spec_uarch_flush = spec_uarch_flush_noop;
2888 		membar_producer();
2889 		return;
2890 	}
2891 
2892 	/*
2893 	 * The locations where we need to perform an L1D flush are required both
2894 	 * for mitigating L1TF and MDS. When verw support is present in
2895 	 * microcode, then the L1D flush will take care of doing that as well.
2896 	 * However, if we have a system where RDCL_NO is present, but we don't
2897 	 * have MDS_NO, then we need to do a verw (x86_md_clear) and not a full
2898 	 * L1D flush.
2899 	 */
2900 	if (!is_x86_feature(featureset, X86FSET_RDCL_NO) &&
2901 	    is_x86_feature(featureset, X86FSET_FLUSH_CMD) &&
2902 	    !is_x86_feature(featureset, X86FSET_L1D_VM_NO)) {
2903 		need_l1d = B_TRUE;
2904 	} else {
2905 		need_l1d = B_FALSE;
2906 	}
2907 
2908 	if (!is_x86_feature(featureset, X86FSET_MDS_NO) &&
2909 	    is_x86_feature(featureset, X86FSET_MD_CLEAR)) {
2910 		need_mds = B_TRUE;
2911 	} else {
2912 		need_mds = B_FALSE;
2913 	}
2914 
2915 	if (need_l1d) {
2916 		spec_uarch_flush = spec_uarch_flush_msr;
2917 	} else if (need_mds) {
2918 		spec_uarch_flush = x86_md_clear;
2919 	} else {
2920 		/*
2921 		 * We have no hardware mitigations available to us.
2922 		 */
2923 		spec_uarch_flush = spec_uarch_flush_noop;
2924 	}
2925 	membar_producer();
2926 }
2927 
2928 /*
2929  * We default to enabling RSB mitigations.
2930  *
2931  * NOTE: We used to skip RSB mitigations with eIBRS, but developments around
2932  * post-barrier RSB guessing suggests we should enable RSB mitigations always
2933  * unless specifically instructed not to.
2934  *
2935  * AMD indicates that when Automatic IBRS is enabled we do not need to implement
2936  * return stack buffer clearing for VMEXIT as it takes care of it. The manual
2937  * also states that as long as SMEP and we maintain at least one page between
2938  * the kernel and user space (we have much more of a red zone), then we do not
2939  * need to clear the RSB. We constrain this to only when Automatic IRBS is
2940  * present.
2941  */
2942 static void
2943 cpuid_patch_rsb(x86_spectrev2_mitigation_t mit)
2944 {
2945 	const uint8_t ret = RET_INSTR;
2946 	uint8_t *stuff = (uint8_t *)x86_rsb_stuff;
2947 
2948 	switch (mit) {
2949 	case X86_SPECTREV2_AUTO_IBRS:
2950 	case X86_SPECTREV2_DISABLED:
2951 		*stuff = ret;
2952 		break;
2953 	default:
2954 		break;
2955 	}
2956 }
2957 
2958 static void
2959 cpuid_patch_retpolines(x86_spectrev2_mitigation_t mit)
2960 {
2961 	const char *thunks[] = { "_rax", "_rbx", "_rcx", "_rdx", "_rdi",
2962 	    "_rsi", "_rbp", "_r8", "_r9", "_r10", "_r11", "_r12", "_r13",
2963 	    "_r14", "_r15" };
2964 	const uint_t nthunks = ARRAY_SIZE(thunks);
2965 	const char *type;
2966 	uint_t i;
2967 
2968 	if (mit == x86_spectrev2_mitigation)
2969 		return;
2970 
2971 	switch (mit) {
2972 	case X86_SPECTREV2_RETPOLINE:
2973 		type = "gen";
2974 		break;
2975 	case X86_SPECTREV2_AUTO_IBRS:
2976 	case X86_SPECTREV2_ENHANCED_IBRS:
2977 	case X86_SPECTREV2_DISABLED:
2978 		type = "jmp";
2979 		break;
2980 	default:
2981 		panic("asked to update retpoline state with unknown state!");
2982 	}
2983 
2984 	for (i = 0; i < nthunks; i++) {
2985 		uintptr_t source, dest;
2986 		int ssize, dsize;
2987 		char sourcebuf[64], destbuf[64];
2988 
2989 		(void) snprintf(destbuf, sizeof (destbuf),
2990 		    "__x86_indirect_thunk%s", thunks[i]);
2991 		(void) snprintf(sourcebuf, sizeof (sourcebuf),
2992 		    "__x86_indirect_thunk_%s%s", type, thunks[i]);
2993 
2994 		source = kobj_getelfsym(sourcebuf, NULL, &ssize);
2995 		dest = kobj_getelfsym(destbuf, NULL, &dsize);
2996 		VERIFY3U(source, !=, 0);
2997 		VERIFY3U(dest, !=, 0);
2998 		VERIFY3S(dsize, >=, ssize);
2999 		bcopy((void *)source, (void *)dest, ssize);
3000 	}
3001 }
3002 
3003 static void
3004 cpuid_enable_enhanced_ibrs(void)
3005 {
3006 	uint64_t val;
3007 
3008 	val = rdmsr(MSR_IA32_SPEC_CTRL);
3009 	val |= IA32_SPEC_CTRL_IBRS;
3010 	wrmsr(MSR_IA32_SPEC_CTRL, val);
3011 }
3012 
3013 static void
3014 cpuid_enable_auto_ibrs(void)
3015 {
3016 	uint64_t val;
3017 
3018 	val = rdmsr(MSR_AMD_EFER);
3019 	val |= AMD_EFER_AIBRSE;
3020 	wrmsr(MSR_AMD_EFER, val);
3021 }
3022 
3023 /*
3024  * Determine how we should mitigate TAA or if we need to. Regardless of TAA, if
3025  * we can disable TSX, we do so.
3026  *
3027  * This determination is done only on the boot CPU, potentially after loading
3028  * updated microcode.
3029  */
3030 static void
3031 cpuid_update_tsx(cpu_t *cpu, uchar_t *featureset)
3032 {
3033 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3034 
3035 	VERIFY(cpu->cpu_id == 0);
3036 
3037 	if (cpi->cpi_vendor != X86_VENDOR_Intel) {
3038 		x86_taa_mitigation = X86_TAA_HW_MITIGATED;
3039 		return;
3040 	}
3041 
3042 	if (x86_disable_taa) {
3043 		x86_taa_mitigation = X86_TAA_DISABLED;
3044 		return;
3045 	}
3046 
3047 	/*
3048 	 * If we do not have the ability to disable TSX, then our only
3049 	 * mitigation options are in hardware (TAA_NO), or by using our existing
3050 	 * MDS mitigation as described above.  The latter relies upon us having
3051 	 * configured MDS mitigations correctly! This includes disabling SMT if
3052 	 * we want to cross-CPU-thread protection.
3053 	 */
3054 	if (!is_x86_feature(featureset, X86FSET_TSX_CTRL)) {
3055 		/*
3056 		 * It's not clear whether any parts will enumerate TAA_NO
3057 		 * *without* TSX_CTRL, but let's mark it as such if we see this.
3058 		 */
3059 		if (is_x86_feature(featureset, X86FSET_TAA_NO)) {
3060 			x86_taa_mitigation = X86_TAA_HW_MITIGATED;
3061 			return;
3062 		}
3063 
3064 		if (is_x86_feature(featureset, X86FSET_MD_CLEAR) &&
3065 		    !is_x86_feature(featureset, X86FSET_MDS_NO)) {
3066 			x86_taa_mitigation = X86_TAA_MD_CLEAR;
3067 		} else {
3068 			x86_taa_mitigation = X86_TAA_NOTHING;
3069 		}
3070 		return;
3071 	}
3072 
3073 	/*
3074 	 * We have TSX_CTRL, but we can only fully disable TSX if we're early
3075 	 * enough in boot.
3076 	 *
3077 	 * Otherwise, we'll fall back to causing transactions to abort as our
3078 	 * mitigation. TSX-using code will always take the fallback path.
3079 	 */
3080 	if (cpi->cpi_pass < 4) {
3081 		x86_taa_mitigation = X86_TAA_TSX_DISABLE;
3082 	} else {
3083 		x86_taa_mitigation = X86_TAA_TSX_FORCE_ABORT;
3084 	}
3085 }
3086 
3087 /*
3088  * As mentioned, we should only touch the MSR when we've got a suitable
3089  * microcode loaded on this CPU.
3090  */
3091 static void
3092 cpuid_apply_tsx(x86_taa_mitigation_t taa, uchar_t *featureset)
3093 {
3094 	uint64_t val;
3095 
3096 	switch (taa) {
3097 	case X86_TAA_TSX_DISABLE:
3098 		if (!is_x86_feature(featureset, X86FSET_TSX_CTRL))
3099 			return;
3100 		val = rdmsr(MSR_IA32_TSX_CTRL);
3101 		val |= IA32_TSX_CTRL_CPUID_CLEAR | IA32_TSX_CTRL_RTM_DISABLE;
3102 		wrmsr(MSR_IA32_TSX_CTRL, val);
3103 		break;
3104 	case X86_TAA_TSX_FORCE_ABORT:
3105 		if (!is_x86_feature(featureset, X86FSET_TSX_CTRL))
3106 			return;
3107 		val = rdmsr(MSR_IA32_TSX_CTRL);
3108 		val |= IA32_TSX_CTRL_RTM_DISABLE;
3109 		wrmsr(MSR_IA32_TSX_CTRL, val);
3110 		break;
3111 	case X86_TAA_HW_MITIGATED:
3112 	case X86_TAA_MD_CLEAR:
3113 	case X86_TAA_DISABLED:
3114 	case X86_TAA_NOTHING:
3115 		break;
3116 	}
3117 }
3118 
3119 static void
3120 cpuid_scan_security(cpu_t *cpu, uchar_t *featureset)
3121 {
3122 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3123 	x86_spectrev2_mitigation_t v2mit;
3124 
3125 	if ((cpi->cpi_vendor == X86_VENDOR_AMD ||
3126 	    cpi->cpi_vendor == X86_VENDOR_HYGON) &&
3127 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
3128 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBPB)
3129 			add_x86_feature(featureset, X86FSET_IBPB);
3130 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS)
3131 			add_x86_feature(featureset, X86FSET_IBRS);
3132 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP)
3133 			add_x86_feature(featureset, X86FSET_STIBP);
3134 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP_ALL)
3135 			add_x86_feature(featureset, X86FSET_STIBP_ALL);
3136 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSBD)
3137 			add_x86_feature(featureset, X86FSET_SSBD);
3138 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_VIRT_SSBD)
3139 			add_x86_feature(featureset, X86FSET_SSBD_VIRT);
3140 		if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSB_NO)
3141 			add_x86_feature(featureset, X86FSET_SSB_NO);
3142 
3143 		/*
3144 		 * Rather than Enhanced IBRS, AMD has a different feature that
3145 		 * is a bit in EFER that can be enabled and will basically do
3146 		 * the right thing while executing in the kernel.
3147 		 */
3148 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
3149 		    (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PREFER_IBRS) &&
3150 		    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_21 &&
3151 		    (cpi->cpi_extd[0x21].cp_eax & CPUID_AMD_8X21_EAX_AIBRS)) {
3152 			add_x86_feature(featureset, X86FSET_AUTO_IBRS);
3153 		}
3154 
3155 	} else if (cpi->cpi_vendor == X86_VENDOR_Intel &&
3156 	    cpi->cpi_maxeax >= 7) {
3157 		struct cpuid_regs *ecp;
3158 		ecp = &cpi->cpi_std[7];
3159 
3160 		if (ecp->cp_edx & CPUID_INTC_EDX_7_0_MD_CLEAR) {
3161 			add_x86_feature(featureset, X86FSET_MD_CLEAR);
3162 		}
3163 
3164 		if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SPEC_CTRL) {
3165 			add_x86_feature(featureset, X86FSET_IBRS);
3166 			add_x86_feature(featureset, X86FSET_IBPB);
3167 		}
3168 
3169 		if (ecp->cp_edx & CPUID_INTC_EDX_7_0_STIBP) {
3170 			add_x86_feature(featureset, X86FSET_STIBP);
3171 		}
3172 
3173 		/*
3174 		 * Don't read the arch caps MSR on xpv where we lack the
3175 		 * on_trap().
3176 		 */
3177 #ifndef __xpv
3178 		if (ecp->cp_edx & CPUID_INTC_EDX_7_0_ARCH_CAPS) {
3179 			on_trap_data_t otd;
3180 
3181 			/*
3182 			 * Be paranoid and assume we'll get a #GP.
3183 			 */
3184 			if (!on_trap(&otd, OT_DATA_ACCESS)) {
3185 				uint64_t reg;
3186 
3187 				reg = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3188 				if (reg & IA32_ARCH_CAP_RDCL_NO) {
3189 					add_x86_feature(featureset,
3190 					    X86FSET_RDCL_NO);
3191 				}
3192 				if (reg & IA32_ARCH_CAP_IBRS_ALL) {
3193 					add_x86_feature(featureset,
3194 					    X86FSET_IBRS_ALL);
3195 				}
3196 				if (reg & IA32_ARCH_CAP_RSBA) {
3197 					add_x86_feature(featureset,
3198 					    X86FSET_RSBA);
3199 				}
3200 				if (reg & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) {
3201 					add_x86_feature(featureset,
3202 					    X86FSET_L1D_VM_NO);
3203 				}
3204 				if (reg & IA32_ARCH_CAP_SSB_NO) {
3205 					add_x86_feature(featureset,
3206 					    X86FSET_SSB_NO);
3207 				}
3208 				if (reg & IA32_ARCH_CAP_MDS_NO) {
3209 					add_x86_feature(featureset,
3210 					    X86FSET_MDS_NO);
3211 				}
3212 				if (reg & IA32_ARCH_CAP_TSX_CTRL) {
3213 					add_x86_feature(featureset,
3214 					    X86FSET_TSX_CTRL);
3215 				}
3216 				if (reg & IA32_ARCH_CAP_TAA_NO) {
3217 					add_x86_feature(featureset,
3218 					    X86FSET_TAA_NO);
3219 				}
3220 			}
3221 			no_trap();
3222 		}
3223 #endif	/* !__xpv */
3224 
3225 		if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
3226 			add_x86_feature(featureset, X86FSET_SSBD);
3227 
3228 		if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
3229 			add_x86_feature(featureset, X86FSET_FLUSH_CMD);
3230 	}
3231 
3232 	/*
3233 	 * Take care of certain mitigations on the non-boot CPU. The boot CPU
3234 	 * will have already run this function and determined what we need to
3235 	 * do. This gives us a hook for per-HW thread mitigations such as
3236 	 * enhanced IBRS, or disabling TSX.
3237 	 */
3238 	if (cpu->cpu_id != 0) {
3239 		switch (x86_spectrev2_mitigation) {
3240 		case X86_SPECTREV2_ENHANCED_IBRS:
3241 			cpuid_enable_enhanced_ibrs();
3242 			break;
3243 		case X86_SPECTREV2_AUTO_IBRS:
3244 			cpuid_enable_auto_ibrs();
3245 			break;
3246 		default:
3247 			break;
3248 		}
3249 
3250 		cpuid_apply_tsx(x86_taa_mitigation, featureset);
3251 		return;
3252 	}
3253 
3254 	/*
3255 	 * Go through and initialize various security mechanisms that we should
3256 	 * only do on a single CPU. This includes Spectre V2, L1TF, MDS, and
3257 	 * TAA.
3258 	 */
3259 
3260 	/*
3261 	 * By default we've come in with retpolines enabled. Check whether we
3262 	 * should disable them or enable enhanced or automatic IBRS. RSB
3263 	 * stuffing is enabled by default. Note, we do not allow the use of AMD
3264 	 * optimized retpolines as it was disclosed by AMD in March 2022 that
3265 	 * they were still vulnerable. Prior to that point, we used them.
3266 	 */
3267 	if (x86_disable_spectrev2 != 0) {
3268 		v2mit = X86_SPECTREV2_DISABLED;
3269 	} else if (is_x86_feature(featureset, X86FSET_AUTO_IBRS)) {
3270 		cpuid_enable_auto_ibrs();
3271 		v2mit = X86_SPECTREV2_AUTO_IBRS;
3272 	} else if (is_x86_feature(featureset, X86FSET_IBRS_ALL)) {
3273 		cpuid_enable_enhanced_ibrs();
3274 		v2mit = X86_SPECTREV2_ENHANCED_IBRS;
3275 	} else {
3276 		v2mit = X86_SPECTREV2_RETPOLINE;
3277 	}
3278 
3279 	cpuid_patch_retpolines(v2mit);
3280 	cpuid_patch_rsb(v2mit);
3281 	x86_spectrev2_mitigation = v2mit;
3282 	membar_producer();
3283 
3284 	/*
3285 	 * We need to determine what changes are required for mitigating L1TF
3286 	 * and MDS. If the CPU suffers from either of them, then SMT exclusion
3287 	 * is required.
3288 	 *
3289 	 * If any of these are present, then we need to flush u-arch state at
3290 	 * various points. For MDS, we need to do so whenever we change to a
3291 	 * lesser privilege level or we are halting the CPU. For L1TF we need to
3292 	 * flush the L1D cache at VM entry. When we have microcode that handles
3293 	 * MDS, the L1D flush also clears the other u-arch state that the
3294 	 * md_clear does.
3295 	 */
3296 
3297 	/*
3298 	 * Update whether or not we need to be taking explicit action against
3299 	 * MDS.
3300 	 */
3301 	cpuid_update_md_clear(cpu, featureset);
3302 
3303 	/*
3304 	 * Determine whether SMT exclusion is required and whether or not we
3305 	 * need to perform an l1d flush.
3306 	 */
3307 	cpuid_update_l1d_flush(cpu, featureset);
3308 
3309 	/*
3310 	 * Determine what our mitigation strategy should be for TAA and then
3311 	 * also apply TAA mitigations.
3312 	 */
3313 	cpuid_update_tsx(cpu, featureset);
3314 	cpuid_apply_tsx(x86_taa_mitigation, featureset);
3315 }
3316 
3317 /*
3318  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
3319  */
3320 void
3321 setup_xfem(void)
3322 {
3323 	uint64_t flags = XFEATURE_LEGACY_FP;
3324 
3325 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
3326 
3327 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
3328 		flags |= XFEATURE_SSE;
3329 
3330 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
3331 		flags |= XFEATURE_AVX;
3332 
3333 	if (is_x86_feature(x86_featureset, X86FSET_AVX512F))
3334 		flags |= XFEATURE_AVX512;
3335 
3336 	set_xcr(XFEATURE_ENABLED_MASK, flags);
3337 
3338 	xsave_bv_all = flags;
3339 }
3340 
3341 static void
3342 cpuid_basic_topology(cpu_t *cpu, uchar_t *featureset)
3343 {
3344 	struct cpuid_info *cpi;
3345 
3346 	cpi = cpu->cpu_m.mcpu_cpi;
3347 
3348 	if (cpi->cpi_vendor == X86_VENDOR_AMD ||
3349 	    cpi->cpi_vendor == X86_VENDOR_HYGON) {
3350 		cpuid_gather_amd_topology_leaves(cpu);
3351 	}
3352 
3353 	cpi->cpi_apicid = cpuid_gather_apicid(cpi);
3354 
3355 	/*
3356 	 * Before we can calculate the IDs that we should assign to this
3357 	 * processor, we need to understand how many cores and threads it has.
3358 	 */
3359 	switch (cpi->cpi_vendor) {
3360 	case X86_VENDOR_Intel:
3361 		cpuid_intel_ncores(cpi, &cpi->cpi_ncpu_per_chip,
3362 		    &cpi->cpi_ncore_per_chip);
3363 		break;
3364 	case X86_VENDOR_AMD:
3365 	case X86_VENDOR_HYGON:
3366 		cpuid_amd_ncores(cpi, &cpi->cpi_ncpu_per_chip,
3367 		    &cpi->cpi_ncore_per_chip);
3368 		break;
3369 	default:
3370 		/*
3371 		 * If we have some other x86 compatible chip, it's not clear how
3372 		 * they would behave. The most common case is virtualization
3373 		 * today, though there are also 64-bit VIA chips. Assume that
3374 		 * all we can get is the basic Leaf 1 HTT information.
3375 		 */
3376 		if ((cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_HTT) != 0) {
3377 			cpi->cpi_ncore_per_chip = 1;
3378 			cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
3379 		}
3380 		break;
3381 	}
3382 
3383 	/*
3384 	 * Based on the calculated number of threads and cores, potentially
3385 	 * assign the HTT and CMT features.
3386 	 */
3387 	if (cpi->cpi_ncore_per_chip > 1) {
3388 		add_x86_feature(featureset, X86FSET_CMP);
3389 	}
3390 
3391 	if (cpi->cpi_ncpu_per_chip > 1 &&
3392 	    cpi->cpi_ncpu_per_chip != cpi->cpi_ncore_per_chip) {
3393 		add_x86_feature(featureset, X86FSET_HTT);
3394 	}
3395 
3396 	/*
3397 	 * Now that has been set up, we need to go through and calculate all of
3398 	 * the rest of the parameters that exist. If we think the CPU doesn't
3399 	 * have either SMT (HTT) or CMP, then we basically go through and fake
3400 	 * up information in some way. The most likely case for this is
3401 	 * virtualization where we have a lot of partial topology information.
3402 	 */
3403 	if (!is_x86_feature(featureset, X86FSET_HTT) &&
3404 	    !is_x86_feature(featureset, X86FSET_CMP)) {
3405 		/*
3406 		 * This is a single core, single-threaded processor.
3407 		 */
3408 		cpi->cpi_procnodes_per_pkg = 1;
3409 		cpi->cpi_cores_per_compunit = 1;
3410 		cpi->cpi_compunitid = 0;
3411 		cpi->cpi_chipid = -1;
3412 		cpi->cpi_clogid = 0;
3413 		cpi->cpi_coreid = cpu->cpu_id;
3414 		cpi->cpi_pkgcoreid = 0;
3415 		if (cpi->cpi_vendor == X86_VENDOR_AMD ||
3416 		    cpi->cpi_vendor == X86_VENDOR_HYGON) {
3417 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
3418 		} else {
3419 			cpi->cpi_procnodeid = cpi->cpi_chipid;
3420 		}
3421 	} else {
3422 		switch (cpi->cpi_vendor) {
3423 		case X86_VENDOR_Intel:
3424 			cpuid_intel_getids(cpu, featureset);
3425 			break;
3426 		case X86_VENDOR_AMD:
3427 		case X86_VENDOR_HYGON:
3428 			cpuid_amd_getids(cpu, featureset);
3429 			break;
3430 		default:
3431 			/*
3432 			 * In this case, it's hard to say what we should do.
3433 			 * We're going to model them to the OS as single core
3434 			 * threads. We don't have a good identifier for them, so
3435 			 * we're just going to use the cpu id all on a single
3436 			 * chip.
3437 			 *
3438 			 * This case has historically been different from the
3439 			 * case above where we don't have HTT or CMP. While they
3440 			 * could be combined, we've opted to keep it separate to
3441 			 * minimize the risk of topology changes in weird cases.
3442 			 */
3443 			cpi->cpi_procnodes_per_pkg = 1;
3444 			cpi->cpi_cores_per_compunit = 1;
3445 			cpi->cpi_chipid = 0;
3446 			cpi->cpi_coreid = cpu->cpu_id;
3447 			cpi->cpi_clogid = cpu->cpu_id;
3448 			cpi->cpi_pkgcoreid = cpu->cpu_id;
3449 			cpi->cpi_procnodeid = cpi->cpi_chipid;
3450 			cpi->cpi_compunitid = cpi->cpi_coreid;
3451 			break;
3452 		}
3453 	}
3454 }
3455 
3456 /*
3457  * Gather relevant CPU features from leaf 6 which covers thermal information. We
3458  * always gather leaf 6 if it's supported; however, we only look for features on
3459  * Intel systems as AMD does not currently define any of the features we look
3460  * for below.
3461  */
3462 static void
3463 cpuid_basic_thermal(cpu_t *cpu, uchar_t *featureset)
3464 {
3465 	struct cpuid_regs *cp;
3466 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3467 
3468 	if (cpi->cpi_maxeax < 6) {
3469 		return;
3470 	}
3471 
3472 	cp = &cpi->cpi_std[6];
3473 	cp->cp_eax = 6;
3474 	cp->cp_ebx = cp->cp_ecx = cp->cp_edx = 0;
3475 	(void) __cpuid_insn(cp);
3476 	platform_cpuid_mangle(cpi->cpi_vendor, 6, cp);
3477 
3478 	if (cpi->cpi_vendor != X86_VENDOR_Intel) {
3479 		return;
3480 	}
3481 
3482 	if ((cp->cp_eax & CPUID_INTC_EAX_DTS) != 0) {
3483 		add_x86_feature(featureset, X86FSET_CORE_THERMAL);
3484 	}
3485 
3486 	if ((cp->cp_eax & CPUID_INTC_EAX_PTM) != 0) {
3487 		add_x86_feature(featureset, X86FSET_PKG_THERMAL);
3488 	}
3489 }
3490 
3491 /*
3492  * This is used when we discover that we have AVX support in cpuid. This
3493  * proceeds to scan for the rest of the AVX derived features.
3494  */
3495 static void
3496 cpuid_basic_avx(cpu_t *cpu, uchar_t *featureset)
3497 {
3498 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3499 
3500 	/*
3501 	 * If we don't have AVX, don't bother with most of this.
3502 	 */
3503 	if ((cpi->cpi_std[1].cp_ecx & CPUID_INTC_ECX_AVX) == 0)
3504 		return;
3505 
3506 	add_x86_feature(featureset, X86FSET_AVX);
3507 
3508 	/*
3509 	 * Intel says we can't check these without also
3510 	 * checking AVX.
3511 	 */
3512 	if (cpi->cpi_std[1].cp_ecx & CPUID_INTC_ECX_F16C)
3513 		add_x86_feature(featureset, X86FSET_F16C);
3514 
3515 	if (cpi->cpi_std[1].cp_ecx & CPUID_INTC_ECX_FMA)
3516 		add_x86_feature(featureset, X86FSET_FMA);
3517 
3518 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_BMI1)
3519 		add_x86_feature(featureset, X86FSET_BMI1);
3520 
3521 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_BMI2)
3522 		add_x86_feature(featureset, X86FSET_BMI2);
3523 
3524 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX2)
3525 		add_x86_feature(featureset, X86FSET_AVX2);
3526 
3527 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_VAES)
3528 		add_x86_feature(featureset, X86FSET_VAES);
3529 
3530 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_VPCLMULQDQ)
3531 		add_x86_feature(featureset, X86FSET_VPCLMULQDQ);
3532 
3533 	/*
3534 	 * The rest of the AVX features require AVX512. Do not check them unless
3535 	 * it is present.
3536 	 */
3537 	if ((cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512F) == 0)
3538 		return;
3539 	add_x86_feature(featureset, X86FSET_AVX512F);
3540 
3541 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512DQ)
3542 		add_x86_feature(featureset, X86FSET_AVX512DQ);
3543 
3544 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512IFMA)
3545 		add_x86_feature(featureset, X86FSET_AVX512FMA);
3546 
3547 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512PF)
3548 		add_x86_feature(featureset, X86FSET_AVX512PF);
3549 
3550 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512ER)
3551 		add_x86_feature(featureset, X86FSET_AVX512ER);
3552 
3553 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512CD)
3554 		add_x86_feature(featureset, X86FSET_AVX512CD);
3555 
3556 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512BW)
3557 		add_x86_feature(featureset, X86FSET_AVX512BW);
3558 
3559 	if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_AVX512VL)
3560 		add_x86_feature(featureset, X86FSET_AVX512VL);
3561 
3562 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_AVX512VBMI)
3563 		add_x86_feature(featureset, X86FSET_AVX512VBMI);
3564 
3565 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_AVX512VBMI2)
3566 		add_x86_feature(featureset, X86FSET_AVX512_VBMI2);
3567 
3568 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_AVX512VNNI)
3569 		add_x86_feature(featureset, X86FSET_AVX512VNNI);
3570 
3571 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_AVX512BITALG)
3572 		add_x86_feature(featureset, X86FSET_AVX512_BITALG);
3573 
3574 	if (cpi->cpi_std[7].cp_ecx & CPUID_INTC_ECX_7_0_AVX512VPOPCDQ)
3575 		add_x86_feature(featureset, X86FSET_AVX512VPOPCDQ);
3576 
3577 	if (cpi->cpi_std[7].cp_edx & CPUID_INTC_EDX_7_0_AVX5124NNIW)
3578 		add_x86_feature(featureset, X86FSET_AVX512NNIW);
3579 
3580 	if (cpi->cpi_std[7].cp_edx & CPUID_INTC_EDX_7_0_AVX5124FMAPS)
3581 		add_x86_feature(featureset, X86FSET_AVX512FMAPS);
3582 
3583 	/*
3584 	 * More features here are in Leaf 7, subleaf 1. Don't bother checking if
3585 	 * we don't need to.
3586 	 */
3587 	if (cpi->cpi_std[7].cp_eax < 1)
3588 		return;
3589 
3590 	if (cpi->cpi_sub7[0].cp_eax & CPUID_INTC_EAX_7_1_AVX512_BF16)
3591 		add_x86_feature(featureset, X86FSET_AVX512_BF16);
3592 }
3593 
3594 /*
3595  * PPIN is the protected processor inventory number. On AMD this is an actual
3596  * feature bit. However, on Intel systems we need to read the platform
3597  * information MSR if we're on a specific model.
3598  */
3599 #if !defined(__xpv)
3600 static void
3601 cpuid_basic_ppin(cpu_t *cpu, uchar_t *featureset)
3602 {
3603 	on_trap_data_t otd;
3604 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3605 
3606 	switch (cpi->cpi_vendor) {
3607 	case X86_VENDOR_AMD:
3608 		/*
3609 		 * This leaf will have already been gathered in the topology
3610 		 * functions.
3611 		 */
3612 		if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
3613 			if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PPIN) {
3614 				add_x86_feature(featureset, X86FSET_PPIN);
3615 			}
3616 		}
3617 		break;
3618 	case X86_VENDOR_Intel:
3619 		if (cpi->cpi_family != 6)
3620 			break;
3621 		switch (cpi->cpi_model) {
3622 		case INTC_MODEL_IVYBRIDGE_XEON:
3623 		case INTC_MODEL_HASWELL_XEON:
3624 		case INTC_MODEL_BROADWELL_XEON:
3625 		case INTC_MODEL_BROADWELL_XEON_D:
3626 		case INTC_MODEL_SKYLAKE_XEON:
3627 		case INTC_MODEL_ICELAKE_XEON:
3628 			if (!on_trap(&otd, OT_DATA_ACCESS)) {
3629 				uint64_t value;
3630 
3631 				value = rdmsr(MSR_PLATFORM_INFO);
3632 				if ((value & MSR_PLATFORM_INFO_PPIN) != 0) {
3633 					add_x86_feature(featureset,
3634 					    X86FSET_PPIN);
3635 				}
3636 			}
3637 			no_trap();
3638 			break;
3639 		default:
3640 			break;
3641 		}
3642 		break;
3643 	default:
3644 		break;
3645 	}
3646 }
3647 #endif	/* ! __xpv */
3648 
3649 static void
3650 cpuid_pass_prelude(cpu_t *cpu, void *arg)
3651 {
3652 	uchar_t *featureset = (uchar_t *)arg;
3653 
3654 	/*
3655 	 * We don't run on any processor that doesn't have cpuid, and could not
3656 	 * possibly have arrived here.
3657 	 */
3658 	add_x86_feature(featureset, X86FSET_CPUID);
3659 }
3660 
3661 static void
3662 cpuid_pass_ident(cpu_t *cpu, void *arg __unused)
3663 {
3664 	struct cpuid_info *cpi;
3665 	struct cpuid_regs *cp;
3666 
3667 	/*
3668 	 * We require that virtual/native detection be complete and that PCI
3669 	 * config space access has been set up; at present there is no reliable
3670 	 * way to determine the latter.
3671 	 */
3672 #if !defined(__xpv)
3673 	ASSERT3S(platform_type, !=, -1);
3674 #endif	/* !__xpv */
3675 
3676 	cpi = cpu->cpu_m.mcpu_cpi;
3677 	ASSERT(cpi != NULL);
3678 
3679 	cp = &cpi->cpi_std[0];
3680 	cp->cp_eax = 0;
3681 	cpi->cpi_maxeax = __cpuid_insn(cp);
3682 	{
3683 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
3684 		*iptr++ = cp->cp_ebx;
3685 		*iptr++ = cp->cp_edx;
3686 		*iptr++ = cp->cp_ecx;
3687 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
3688 	}
3689 
3690 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
3691 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
3692 
3693 	/*
3694 	 * Limit the range in case of weird hardware
3695 	 */
3696 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
3697 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
3698 	if (cpi->cpi_maxeax < 1)
3699 		return;
3700 
3701 	cp = &cpi->cpi_std[1];
3702 	cp->cp_eax = 1;
3703 	(void) __cpuid_insn(cp);
3704 
3705 	/*
3706 	 * Extract identifying constants for easy access.
3707 	 */
3708 	cpi->cpi_model = CPI_MODEL(cpi);
3709 	cpi->cpi_family = CPI_FAMILY(cpi);
3710 
3711 	if (cpi->cpi_family == 0xf)
3712 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
3713 
3714 	/*
3715 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
3716 	 * Intel, and presumably everyone else, uses model == 0xf, as
3717 	 * one would expect (max value means possible overflow).  Sigh.
3718 	 */
3719 
3720 	switch (cpi->cpi_vendor) {
3721 	case X86_VENDOR_Intel:
3722 		if (IS_EXTENDED_MODEL_INTEL(cpi))
3723 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
3724 		break;
3725 	case X86_VENDOR_AMD:
3726 		if (CPI_FAMILY(cpi) == 0xf)
3727 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
3728 		break;
3729 	case X86_VENDOR_HYGON:
3730 		cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
3731 		break;
3732 	default:
3733 		if (cpi->cpi_model == 0xf)
3734 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
3735 		break;
3736 	}
3737 
3738 	cpi->cpi_step = CPI_STEP(cpi);
3739 	cpi->cpi_brandid = CPI_BRANDID(cpi);
3740 
3741 	/*
3742 	 * Synthesize chip "revision" and socket type
3743 	 */
3744 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
3745 	    cpi->cpi_model, cpi->cpi_step);
3746 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
3747 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
3748 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
3749 	    cpi->cpi_model, cpi->cpi_step);
3750 	cpi->cpi_uarchrev = _cpuid_uarchrev(cpi->cpi_vendor, cpi->cpi_family,
3751 	    cpi->cpi_model, cpi->cpi_step);
3752 }
3753 
3754 static void
3755 cpuid_pass_basic(cpu_t *cpu, void *arg)
3756 {
3757 	uchar_t *featureset = (uchar_t *)arg;
3758 	uint32_t mask_ecx, mask_edx;
3759 	struct cpuid_info *cpi;
3760 	struct cpuid_regs *cp;
3761 	int xcpuid;
3762 #if !defined(__xpv)
3763 	extern int idle_cpu_prefer_mwait;
3764 #endif
3765 
3766 	cpi = cpu->cpu_m.mcpu_cpi;
3767 	ASSERT(cpi != NULL);
3768 
3769 	if (cpi->cpi_maxeax < 1)
3770 		return;
3771 
3772 	/*
3773 	 * This was filled during the identification pass.
3774 	 */
3775 	cp = &cpi->cpi_std[1];
3776 
3777 	/*
3778 	 * *default* assumptions:
3779 	 * - believe %edx feature word
3780 	 * - ignore %ecx feature word
3781 	 * - 32-bit virtual and physical addressing
3782 	 */
3783 	mask_edx = 0xffffffff;
3784 	mask_ecx = 0;
3785 
3786 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
3787 
3788 	switch (cpi->cpi_vendor) {
3789 	case X86_VENDOR_Intel:
3790 		if (cpi->cpi_family == 5)
3791 			x86_type = X86_TYPE_P5;
3792 		else if (IS_LEGACY_P6(cpi)) {
3793 			x86_type = X86_TYPE_P6;
3794 			pentiumpro_bug4046376 = 1;
3795 			/*
3796 			 * Clear the SEP bit when it was set erroneously
3797 			 */
3798 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
3799 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
3800 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
3801 			x86_type = X86_TYPE_P4;
3802 			/*
3803 			 * We don't currently depend on any of the %ecx
3804 			 * features until Prescott, so we'll only check
3805 			 * this from P4 onwards.  We might want to revisit
3806 			 * that idea later.
3807 			 */
3808 			mask_ecx = 0xffffffff;
3809 		} else if (cpi->cpi_family > 0xf)
3810 			mask_ecx = 0xffffffff;
3811 		/*
3812 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
3813 		 * to obtain the monitor linesize.
3814 		 */
3815 		if (cpi->cpi_maxeax < 5)
3816 			mask_ecx &= ~CPUID_INTC_ECX_MON;
3817 		break;
3818 	case X86_VENDOR_IntelClone:
3819 	default:
3820 		break;
3821 	case X86_VENDOR_AMD:
3822 #if defined(OPTERON_ERRATUM_108)
3823 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
3824 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
3825 			cpi->cpi_model = 0xc;
3826 		} else
3827 #endif
3828 		if (cpi->cpi_family == 5) {
3829 			/*
3830 			 * AMD K5 and K6
3831 			 *
3832 			 * These CPUs have an incomplete implementation
3833 			 * of MCA/MCE which we mask away.
3834 			 */
3835 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
3836 
3837 			/*
3838 			 * Model 0 uses the wrong (APIC) bit
3839 			 * to indicate PGE.  Fix it here.
3840 			 */
3841 			if (cpi->cpi_model == 0) {
3842 				if (cp->cp_edx & 0x200) {
3843 					cp->cp_edx &= ~0x200;
3844 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
3845 				}
3846 			}
3847 
3848 			/*
3849 			 * Early models had problems w/ MMX; disable.
3850 			 */
3851 			if (cpi->cpi_model < 6)
3852 				mask_edx &= ~CPUID_INTC_EDX_MMX;
3853 		}
3854 
3855 		/*
3856 		 * For newer families, SSE3 and CX16, at least, are valid;
3857 		 * enable all
3858 		 */
3859 		if (cpi->cpi_family >= 0xf)
3860 			mask_ecx = 0xffffffff;
3861 		/*
3862 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
3863 		 * to obtain the monitor linesize.
3864 		 */
3865 		if (cpi->cpi_maxeax < 5)
3866 			mask_ecx &= ~CPUID_INTC_ECX_MON;
3867 
3868 #if !defined(__xpv)
3869 		/*
3870 		 * AMD has not historically used MWAIT in the CPU's idle loop.
3871 		 * Pre-family-10h Opterons do not have the MWAIT instruction. We
3872 		 * know for certain that in at least family 17h, per AMD, mwait
3873 		 * is preferred. Families in-between are less certain.
3874 		 */
3875 		if (cpi->cpi_family < 0x17) {
3876 			idle_cpu_prefer_mwait = 0;
3877 		}
3878 #endif
3879 
3880 		break;
3881 	case X86_VENDOR_HYGON:
3882 		/* Enable all for Hygon Dhyana CPU */
3883 		mask_ecx = 0xffffffff;
3884 		break;
3885 	case X86_VENDOR_TM:
3886 		/*
3887 		 * workaround the NT workaround in CMS 4.1
3888 		 */
3889 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
3890 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
3891 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
3892 		break;
3893 	case X86_VENDOR_Centaur:
3894 		/*
3895 		 * workaround the NT workarounds again
3896 		 */
3897 		if (cpi->cpi_family == 6)
3898 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
3899 		break;
3900 	case X86_VENDOR_Cyrix:
3901 		/*
3902 		 * We rely heavily on the probing in locore
3903 		 * to actually figure out what parts, if any,
3904 		 * of the Cyrix cpuid instruction to believe.
3905 		 */
3906 		switch (x86_type) {
3907 		case X86_TYPE_CYRIX_486:
3908 			mask_edx = 0;
3909 			break;
3910 		case X86_TYPE_CYRIX_6x86:
3911 			mask_edx = 0;
3912 			break;
3913 		case X86_TYPE_CYRIX_6x86L:
3914 			mask_edx =
3915 			    CPUID_INTC_EDX_DE |
3916 			    CPUID_INTC_EDX_CX8;
3917 			break;
3918 		case X86_TYPE_CYRIX_6x86MX:
3919 			mask_edx =
3920 			    CPUID_INTC_EDX_DE |
3921 			    CPUID_INTC_EDX_MSR |
3922 			    CPUID_INTC_EDX_CX8 |
3923 			    CPUID_INTC_EDX_PGE |
3924 			    CPUID_INTC_EDX_CMOV |
3925 			    CPUID_INTC_EDX_MMX;
3926 			break;
3927 		case X86_TYPE_CYRIX_GXm:
3928 			mask_edx =
3929 			    CPUID_INTC_EDX_MSR |
3930 			    CPUID_INTC_EDX_CX8 |
3931 			    CPUID_INTC_EDX_CMOV |
3932 			    CPUID_INTC_EDX_MMX;
3933 			break;
3934 		case X86_TYPE_CYRIX_MediaGX:
3935 			break;
3936 		case X86_TYPE_CYRIX_MII:
3937 		case X86_TYPE_VIA_CYRIX_III:
3938 			mask_edx =
3939 			    CPUID_INTC_EDX_DE |
3940 			    CPUID_INTC_EDX_TSC |
3941 			    CPUID_INTC_EDX_MSR |
3942 			    CPUID_INTC_EDX_CX8 |
3943 			    CPUID_INTC_EDX_PGE |
3944 			    CPUID_INTC_EDX_CMOV |
3945 			    CPUID_INTC_EDX_MMX;
3946 			break;
3947 		default:
3948 			break;
3949 		}
3950 		break;
3951 	}
3952 
3953 #if defined(__xpv)
3954 	/*
3955 	 * Do not support MONITOR/MWAIT under a hypervisor
3956 	 */
3957 	mask_ecx &= ~CPUID_INTC_ECX_MON;
3958 	/*
3959 	 * Do not support XSAVE under a hypervisor for now
3960 	 */
3961 	xsave_force_disable = B_TRUE;
3962 
3963 #endif	/* __xpv */
3964 
3965 	if (xsave_force_disable) {
3966 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
3967 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
3968 		mask_ecx &= ~CPUID_INTC_ECX_F16C;
3969 		mask_ecx &= ~CPUID_INTC_ECX_FMA;
3970 	}
3971 
3972 	/*
3973 	 * Now we've figured out the masks that determine
3974 	 * which bits we choose to believe, apply the masks
3975 	 * to the feature words, then map the kernel's view
3976 	 * of these feature words into its feature word.
3977 	 */
3978 	cp->cp_edx &= mask_edx;
3979 	cp->cp_ecx &= mask_ecx;
3980 
3981 	/*
3982 	 * apply any platform restrictions (we don't call this
3983 	 * immediately after __cpuid_insn here, because we need the
3984 	 * workarounds applied above first)
3985 	 */
3986 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
3987 
3988 	/*
3989 	 * In addition to ecx and edx, Intel and AMD are storing a bunch of
3990 	 * instruction set extensions in leaf 7's ebx, ecx, and edx. Note, leaf
3991 	 * 7 has sub-leaves determined by ecx.
3992 	 */
3993 	if (cpi->cpi_maxeax >= 7) {
3994 		struct cpuid_regs *ecp;
3995 		ecp = &cpi->cpi_std[7];
3996 		ecp->cp_eax = 7;
3997 		ecp->cp_ecx = 0;
3998 		(void) __cpuid_insn(ecp);
3999 
4000 		/*
4001 		 * If XSAVE has been disabled, just ignore all of the
4002 		 * extended-save-area dependent flags here. By removing most of
4003 		 * the leaf 7, sub-leaf 0 flags, that will ensure tha we don't
4004 		 * end up looking at additional xsave dependent leaves right
4005 		 * now.
4006 		 */
4007 		if (xsave_force_disable) {
4008 			ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI1;
4009 			ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI2;
4010 			ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_AVX2;
4011 			ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_MPX;
4012 			ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_ALL_AVX512;
4013 			ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_ALL_AVX512;
4014 			ecp->cp_edx &= ~CPUID_INTC_EDX_7_0_ALL_AVX512;
4015 			ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_VAES;
4016 			ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_VPCLMULQDQ;
4017 			ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_GFNI;
4018 		}
4019 
4020 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP)
4021 			add_x86_feature(featureset, X86FSET_SMEP);
4022 
4023 		/*
4024 		 * We check disable_smap here in addition to in startup_smap()
4025 		 * to ensure CPUs that aren't the boot CPU don't accidentally
4026 		 * include it in the feature set and thus generate a mismatched
4027 		 * x86 feature set across CPUs.
4028 		 */
4029 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMAP &&
4030 		    disable_smap == 0)
4031 			add_x86_feature(featureset, X86FSET_SMAP);
4032 
4033 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_RDSEED)
4034 			add_x86_feature(featureset, X86FSET_RDSEED);
4035 
4036 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_ADX)
4037 			add_x86_feature(featureset, X86FSET_ADX);
4038 
4039 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_FSGSBASE)
4040 			add_x86_feature(featureset, X86FSET_FSGSBASE);
4041 
4042 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_CLFLUSHOPT)
4043 			add_x86_feature(featureset, X86FSET_CLFLUSHOPT);
4044 
4045 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_INVPCID)
4046 			add_x86_feature(featureset, X86FSET_INVPCID);
4047 
4048 		if (ecp->cp_ecx & CPUID_INTC_ECX_7_0_UMIP)
4049 			add_x86_feature(featureset, X86FSET_UMIP);
4050 		if (ecp->cp_ecx & CPUID_INTC_ECX_7_0_PKU)
4051 			add_x86_feature(featureset, X86FSET_PKU);
4052 		if (ecp->cp_ecx & CPUID_INTC_ECX_7_0_OSPKE)
4053 			add_x86_feature(featureset, X86FSET_OSPKE);
4054 		if (ecp->cp_ecx & CPUID_INTC_ECX_7_0_GFNI)
4055 			add_x86_feature(featureset, X86FSET_GFNI);
4056 
4057 		if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_CLWB)
4058 			add_x86_feature(featureset, X86FSET_CLWB);
4059 
4060 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
4061 			if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_MPX)
4062 				add_x86_feature(featureset, X86FSET_MPX);
4063 		}
4064 
4065 		/*
4066 		 * If we have subleaf 1 available, grab and store that. This is
4067 		 * used for more AVX and related features.
4068 		 */
4069 		if (ecp->cp_eax >= 1) {
4070 			struct cpuid_regs *c71;
4071 			c71 = &cpi->cpi_sub7[0];
4072 			c71->cp_eax = 7;
4073 			c71->cp_ecx = 1;
4074 			(void) __cpuid_insn(c71);
4075 		}
4076 	}
4077 
4078 	/*
4079 	 * fold in overrides from the "eeprom" mechanism
4080 	 */
4081 	cp->cp_edx |= cpuid_feature_edx_include;
4082 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
4083 
4084 	cp->cp_ecx |= cpuid_feature_ecx_include;
4085 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
4086 
4087 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
4088 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
4089 	}
4090 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
4091 		add_x86_feature(featureset, X86FSET_TSC);
4092 	}
4093 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
4094 		add_x86_feature(featureset, X86FSET_MSR);
4095 	}
4096 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
4097 		add_x86_feature(featureset, X86FSET_MTRR);
4098 	}
4099 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
4100 		add_x86_feature(featureset, X86FSET_PGE);
4101 	}
4102 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
4103 		add_x86_feature(featureset, X86FSET_CMOV);
4104 	}
4105 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
4106 		add_x86_feature(featureset, X86FSET_MMX);
4107 	}
4108 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
4109 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
4110 		add_x86_feature(featureset, X86FSET_MCA);
4111 	}
4112 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
4113 		add_x86_feature(featureset, X86FSET_PAE);
4114 	}
4115 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
4116 		add_x86_feature(featureset, X86FSET_CX8);
4117 	}
4118 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
4119 		add_x86_feature(featureset, X86FSET_CX16);
4120 	}
4121 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
4122 		add_x86_feature(featureset, X86FSET_PAT);
4123 	}
4124 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
4125 		add_x86_feature(featureset, X86FSET_SEP);
4126 	}
4127 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
4128 		/*
4129 		 * In our implementation, fxsave/fxrstor
4130 		 * are prerequisites before we'll even
4131 		 * try and do SSE things.
4132 		 */
4133 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
4134 			add_x86_feature(featureset, X86FSET_SSE);
4135 		}
4136 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
4137 			add_x86_feature(featureset, X86FSET_SSE2);
4138 		}
4139 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
4140 			add_x86_feature(featureset, X86FSET_SSE3);
4141 		}
4142 		if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
4143 			add_x86_feature(featureset, X86FSET_SSSE3);
4144 		}
4145 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
4146 			add_x86_feature(featureset, X86FSET_SSE4_1);
4147 		}
4148 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
4149 			add_x86_feature(featureset, X86FSET_SSE4_2);
4150 		}
4151 		if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
4152 			add_x86_feature(featureset, X86FSET_AES);
4153 		}
4154 		if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
4155 			add_x86_feature(featureset, X86FSET_PCLMULQDQ);
4156 		}
4157 
4158 		if (cpi->cpi_std[7].cp_ebx & CPUID_INTC_EBX_7_0_SHA)
4159 			add_x86_feature(featureset, X86FSET_SHA);
4160 
4161 		if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
4162 			add_x86_feature(featureset, X86FSET_XSAVE);
4163 
4164 			/* We only test AVX & AVX512 when there is XSAVE */
4165 			cpuid_basic_avx(cpu, featureset);
4166 		}
4167 	}
4168 
4169 	if (cp->cp_ecx & CPUID_INTC_ECX_PCID) {
4170 		add_x86_feature(featureset, X86FSET_PCID);
4171 	}
4172 
4173 	if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
4174 		add_x86_feature(featureset, X86FSET_X2APIC);
4175 	}
4176 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
4177 		add_x86_feature(featureset, X86FSET_DE);
4178 	}
4179 #if !defined(__xpv)
4180 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
4181 
4182 		/*
4183 		 * We require the CLFLUSH instruction for erratum workaround
4184 		 * to use MONITOR/MWAIT.
4185 		 */
4186 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
4187 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
4188 			add_x86_feature(featureset, X86FSET_MWAIT);
4189 		} else {
4190 			extern int idle_cpu_assert_cflush_monitor;
4191 
4192 			/*
4193 			 * All processors we are aware of which have
4194 			 * MONITOR/MWAIT also have CLFLUSH.
4195 			 */
4196 			if (idle_cpu_assert_cflush_monitor) {
4197 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
4198 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
4199 			}
4200 		}
4201 	}
4202 #endif	/* __xpv */
4203 
4204 	if (cp->cp_ecx & CPUID_INTC_ECX_VMX) {
4205 		add_x86_feature(featureset, X86FSET_VMX);
4206 	}
4207 
4208 	if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND)
4209 		add_x86_feature(featureset, X86FSET_RDRAND);
4210 
4211 	/*
4212 	 * Only need it first time, rest of the cpus would follow suit.
4213 	 * we only capture this for the bootcpu.
4214 	 */
4215 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
4216 		add_x86_feature(featureset, X86FSET_CLFSH);
4217 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
4218 	}
4219 	if (is_x86_feature(featureset, X86FSET_PAE))
4220 		cpi->cpi_pabits = 36;
4221 
4222 	if (cpi->cpi_maxeax >= 0xD && !xsave_force_disable) {
4223 		struct cpuid_regs r, *ecp;
4224 
4225 		ecp = &r;
4226 		ecp->cp_eax = 0xD;
4227 		ecp->cp_ecx = 1;
4228 		ecp->cp_edx = ecp->cp_ebx = 0;
4229 		(void) __cpuid_insn(ecp);
4230 
4231 		if (ecp->cp_eax & CPUID_INTC_EAX_D_1_XSAVEOPT)
4232 			add_x86_feature(featureset, X86FSET_XSAVEOPT);
4233 		if (ecp->cp_eax & CPUID_INTC_EAX_D_1_XSAVEC)
4234 			add_x86_feature(featureset, X86FSET_XSAVEC);
4235 		if (ecp->cp_eax & CPUID_INTC_EAX_D_1_XSAVES)
4236 			add_x86_feature(featureset, X86FSET_XSAVES);
4237 
4238 		/*
4239 		 * Zen 2 family processors suffer from erratum 1386 that causes
4240 		 * xsaves to not function correctly in some circumstances. There
4241 		 * are no supervisor states in Zen 2 and earlier. Practically
4242 		 * speaking this has no impact for us as we currently do not
4243 		 * leverage compressed xsave formats. To safeguard against
4244 		 * issues in the future where we may opt to using it, we remove
4245 		 * it from the feature set now. While Matisse has a microcode
4246 		 * update available with a fix, not all Zen 2 CPUs do so it's
4247 		 * simpler for the moment to unconditionally remove it.
4248 		 */
4249 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
4250 		    uarchrev_uarch(cpi->cpi_uarchrev) <= X86_UARCH_AMD_ZEN2) {
4251 			remove_x86_feature(featureset, X86FSET_XSAVES);
4252 		}
4253 	}
4254 
4255 	/*
4256 	 * Work on the "extended" feature information, doing
4257 	 * some basic initialization to be used in the extended pass.
4258 	 */
4259 	xcpuid = 0;
4260 	switch (cpi->cpi_vendor) {
4261 	case X86_VENDOR_Intel:
4262 		/*
4263 		 * On KVM we know we will have proper support for extended
4264 		 * cpuid.
4265 		 */
4266 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf ||
4267 		    (get_hwenv() == HW_KVM && cpi->cpi_family == 6 &&
4268 		    (cpi->cpi_model == 6 || cpi->cpi_model == 2)))
4269 			xcpuid++;
4270 		break;
4271 	case X86_VENDOR_AMD:
4272 		if (cpi->cpi_family > 5 ||
4273 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
4274 			xcpuid++;
4275 		break;
4276 	case X86_VENDOR_Cyrix:
4277 		/*
4278 		 * Only these Cyrix CPUs are -known- to support
4279 		 * extended cpuid operations.
4280 		 */
4281 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
4282 		    x86_type == X86_TYPE_CYRIX_GXm)
4283 			xcpuid++;
4284 		break;
4285 	case X86_VENDOR_HYGON:
4286 	case X86_VENDOR_Centaur:
4287 	case X86_VENDOR_TM:
4288 	default:
4289 		xcpuid++;
4290 		break;
4291 	}
4292 
4293 	if (xcpuid) {
4294 		cp = &cpi->cpi_extd[0];
4295 		cp->cp_eax = CPUID_LEAF_EXT_0;
4296 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
4297 	}
4298 
4299 	if (cpi->cpi_xmaxeax & CPUID_LEAF_EXT_0) {
4300 
4301 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
4302 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
4303 
4304 		switch (cpi->cpi_vendor) {
4305 		case X86_VENDOR_Intel:
4306 		case X86_VENDOR_AMD:
4307 		case X86_VENDOR_HYGON:
4308 			if (cpi->cpi_xmaxeax < 0x80000001)
4309 				break;
4310 			cp = &cpi->cpi_extd[1];
4311 			cp->cp_eax = 0x80000001;
4312 			(void) __cpuid_insn(cp);
4313 
4314 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
4315 			    cpi->cpi_family == 5 &&
4316 			    cpi->cpi_model == 6 &&
4317 			    cpi->cpi_step == 6) {
4318 				/*
4319 				 * K6 model 6 uses bit 10 to indicate SYSC
4320 				 * Later models use bit 11. Fix it here.
4321 				 */
4322 				if (cp->cp_edx & 0x400) {
4323 					cp->cp_edx &= ~0x400;
4324 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
4325 				}
4326 			}
4327 
4328 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
4329 
4330 			/*
4331 			 * Compute the additions to the kernel's feature word.
4332 			 */
4333 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
4334 				add_x86_feature(featureset, X86FSET_NX);
4335 			}
4336 
4337 			/*
4338 			 * Regardless whether or not we boot 64-bit,
4339 			 * we should have a way to identify whether
4340 			 * the CPU is capable of running 64-bit.
4341 			 */
4342 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
4343 				add_x86_feature(featureset, X86FSET_64);
4344 			}
4345 
4346 			/* 1 GB large page - enable only for 64 bit kernel */
4347 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
4348 				add_x86_feature(featureset, X86FSET_1GPG);
4349 			}
4350 
4351 			if ((cpi->cpi_vendor == X86_VENDOR_AMD ||
4352 			    cpi->cpi_vendor == X86_VENDOR_HYGON) &&
4353 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
4354 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
4355 				add_x86_feature(featureset, X86FSET_SSE4A);
4356 			}
4357 
4358 			/*
4359 			 * It's really tricky to support syscall/sysret in
4360 			 * the i386 kernel; we rely on sysenter/sysexit
4361 			 * instead.  In the amd64 kernel, things are -way-
4362 			 * better.
4363 			 */
4364 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
4365 				add_x86_feature(featureset, X86FSET_ASYSC);
4366 			}
4367 
4368 			/*
4369 			 * While we're thinking about system calls, note
4370 			 * that AMD processors don't support sysenter
4371 			 * in long mode at all, so don't try to program them.
4372 			 */
4373 			if (x86_vendor == X86_VENDOR_AMD ||
4374 			    x86_vendor == X86_VENDOR_HYGON) {
4375 				remove_x86_feature(featureset, X86FSET_SEP);
4376 			}
4377 
4378 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
4379 				add_x86_feature(featureset, X86FSET_TSCP);
4380 			}
4381 
4382 			if (cp->cp_ecx & CPUID_AMD_ECX_SVM) {
4383 				add_x86_feature(featureset, X86FSET_SVM);
4384 			}
4385 
4386 			if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) {
4387 				add_x86_feature(featureset, X86FSET_TOPOEXT);
4388 			}
4389 
4390 			if (cp->cp_ecx & CPUID_AMD_ECX_PCEC) {
4391 				add_x86_feature(featureset, X86FSET_AMD_PCEC);
4392 			}
4393 
4394 			if (cp->cp_ecx & CPUID_AMD_ECX_XOP) {
4395 				add_x86_feature(featureset, X86FSET_XOP);
4396 			}
4397 
4398 			if (cp->cp_ecx & CPUID_AMD_ECX_FMA4) {
4399 				add_x86_feature(featureset, X86FSET_FMA4);
4400 			}
4401 
4402 			if (cp->cp_ecx & CPUID_AMD_ECX_TBM) {
4403 				add_x86_feature(featureset, X86FSET_TBM);
4404 			}
4405 
4406 			if (cp->cp_ecx & CPUID_AMD_ECX_MONITORX) {
4407 				add_x86_feature(featureset, X86FSET_MONITORX);
4408 			}
4409 			break;
4410 		default:
4411 			break;
4412 		}
4413 
4414 		/*
4415 		 * Get CPUID data about processor cores and hyperthreads.
4416 		 */
4417 		switch (cpi->cpi_vendor) {
4418 		case X86_VENDOR_Intel:
4419 			if (cpi->cpi_maxeax >= 4) {
4420 				cp = &cpi->cpi_std[4];
4421 				cp->cp_eax = 4;
4422 				cp->cp_ecx = 0;
4423 				(void) __cpuid_insn(cp);
4424 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
4425 			}
4426 			/*FALLTHROUGH*/
4427 		case X86_VENDOR_AMD:
4428 		case X86_VENDOR_HYGON:
4429 			if (cpi->cpi_xmaxeax < CPUID_LEAF_EXT_8)
4430 				break;
4431 			cp = &cpi->cpi_extd[8];
4432 			cp->cp_eax = CPUID_LEAF_EXT_8;
4433 			(void) __cpuid_insn(cp);
4434 			platform_cpuid_mangle(cpi->cpi_vendor, CPUID_LEAF_EXT_8,
4435 			    cp);
4436 
4437 			/*
4438 			 * AMD uses ebx for some extended functions.
4439 			 */
4440 			if (cpi->cpi_vendor == X86_VENDOR_AMD ||
4441 			    cpi->cpi_vendor == X86_VENDOR_HYGON) {
4442 				/*
4443 				 * While we're here, check for the AMD "Error
4444 				 * Pointer Zero/Restore" feature. This can be
4445 				 * used to setup the FP save handlers
4446 				 * appropriately.
4447 				 */
4448 				if (cp->cp_ebx & CPUID_AMD_EBX_ERR_PTR_ZERO) {
4449 					cpi->cpi_fp_amd_save = 0;
4450 				} else {
4451 					cpi->cpi_fp_amd_save = 1;
4452 				}
4453 
4454 				if (cp->cp_ebx & CPUID_AMD_EBX_CLZERO) {
4455 					add_x86_feature(featureset,
4456 					    X86FSET_CLZERO);
4457 				}
4458 			}
4459 
4460 			/*
4461 			 * Virtual and physical address limits from
4462 			 * cpuid override previously guessed values.
4463 			 */
4464 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
4465 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
4466 			break;
4467 		default:
4468 			break;
4469 		}
4470 
4471 		/*
4472 		 * Get CPUID data about TSC Invariance in Deep C-State.
4473 		 */
4474 		switch (cpi->cpi_vendor) {
4475 		case X86_VENDOR_Intel:
4476 		case X86_VENDOR_AMD:
4477 		case X86_VENDOR_HYGON:
4478 			if (cpi->cpi_maxeax >= 7) {
4479 				cp = &cpi->cpi_extd[7];
4480 				cp->cp_eax = 0x80000007;
4481 				cp->cp_ecx = 0;
4482 				(void) __cpuid_insn(cp);
4483 			}
4484 			break;
4485 		default:
4486 			break;
4487 		}
4488 	}
4489 
4490 	/*
4491 	 * cpuid_basic_ppin assumes that cpuid_basic_topology has already been
4492 	 * run and thus gathered some of its dependent leaves.
4493 	 */
4494 	cpuid_basic_topology(cpu, featureset);
4495 	cpuid_basic_thermal(cpu, featureset);
4496 #if !defined(__xpv)
4497 	cpuid_basic_ppin(cpu, featureset);
4498 #endif
4499 
4500 	if (cpi->cpi_vendor == X86_VENDOR_AMD ||
4501 	    cpi->cpi_vendor == X86_VENDOR_HYGON) {
4502 		if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8 &&
4503 		    cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_ERR_PTR_ZERO) {
4504 			/* Special handling for AMD FP not necessary. */
4505 			cpi->cpi_fp_amd_save = 0;
4506 		} else {
4507 			cpi->cpi_fp_amd_save = 1;
4508 		}
4509 	}
4510 
4511 	/*
4512 	 * Check (and potentially set) if lfence is serializing.
4513 	 * This is useful for accurate rdtsc measurements and AMD retpolines.
4514 	 */
4515 	if ((cpi->cpi_vendor == X86_VENDOR_AMD ||
4516 	    cpi->cpi_vendor == X86_VENDOR_HYGON) &&
4517 	    is_x86_feature(featureset, X86FSET_SSE2)) {
4518 		/*
4519 		 * The AMD white paper Software Techniques For Managing
4520 		 * Speculation on AMD Processors details circumstances for when
4521 		 * lfence instructions are serializing.
4522 		 *
4523 		 * On family 0xf and 0x11, it is inherently so.  On family 0x10
4524 		 * and later (excluding 0x11), a bit in the DE_CFG MSR
4525 		 * determines the lfence behavior.  Per that whitepaper, AMD has
4526 		 * committed to supporting that MSR on all later CPUs.
4527 		 */
4528 		if (cpi->cpi_family == 0xf || cpi->cpi_family == 0x11) {
4529 			add_x86_feature(featureset, X86FSET_LFENCE_SER);
4530 		} else if (cpi->cpi_family >= 0x10) {
4531 #if !defined(__xpv)
4532 			uint64_t val;
4533 
4534 			/*
4535 			 * Be careful when attempting to enable the bit, and
4536 			 * verify that it was actually set in case we are
4537 			 * running in a hypervisor which is less than faithful
4538 			 * about its emulation of this feature.
4539 			 */
4540 			on_trap_data_t otd;
4541 			if (!on_trap(&otd, OT_DATA_ACCESS)) {
4542 				val = rdmsr(MSR_AMD_DE_CFG);
4543 				val |= AMD_DE_CFG_LFENCE_DISPATCH;
4544 				wrmsr(MSR_AMD_DE_CFG, val);
4545 				val = rdmsr(MSR_AMD_DE_CFG);
4546 			} else {
4547 				val = 0;
4548 			}
4549 			no_trap();
4550 
4551 			if ((val & AMD_DE_CFG_LFENCE_DISPATCH) != 0) {
4552 				add_x86_feature(featureset, X86FSET_LFENCE_SER);
4553 			}
4554 #endif
4555 		}
4556 	} else if (cpi->cpi_vendor == X86_VENDOR_Intel &&
4557 	    is_x86_feature(featureset, X86FSET_SSE2)) {
4558 		/*
4559 		 * Documentation and other OSes indicate that lfence is always
4560 		 * serializing on Intel CPUs.
4561 		 */
4562 		add_x86_feature(featureset, X86FSET_LFENCE_SER);
4563 	}
4564 
4565 
4566 	/*
4567 	 * Check the processor leaves that are used for security features. Grab
4568 	 * any additional processor-specific leaves that we may not have yet.
4569 	 */
4570 	switch (cpi->cpi_vendor) {
4571 	case X86_VENDOR_AMD:
4572 	case X86_VENDOR_HYGON:
4573 		if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_21) {
4574 			cp = &cpi->cpi_extd[7];
4575 			cp->cp_eax = CPUID_LEAF_EXT_21;
4576 			cp->cp_ecx = 0;
4577 			(void) __cpuid_insn(cp);
4578 		}
4579 		break;
4580 	default:
4581 		break;
4582 	}
4583 
4584 	cpuid_scan_security(cpu, featureset);
4585 }
4586 
4587 /*
4588  * Make copies of the cpuid table entries we depend on, in
4589  * part for ease of parsing now, in part so that we have only
4590  * one place to correct any of it, in part for ease of
4591  * later export to userland, and in part so we can look at
4592  * this stuff in a crash dump.
4593  */
4594 
4595 static void
4596 cpuid_pass_extended(cpu_t *cpu, void *_arg __unused)
4597 {
4598 	uint_t n, nmax;
4599 	int i;
4600 	struct cpuid_regs *cp;
4601 	uint8_t *dp;
4602 	uint32_t *iptr;
4603 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4604 
4605 	if (cpi->cpi_maxeax < 1)
4606 		return;
4607 
4608 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
4609 		nmax = NMAX_CPI_STD;
4610 	/*
4611 	 * (We already handled n == 0 and n == 1 in the basic pass)
4612 	 */
4613 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
4614 		/*
4615 		 * leaves 6 and 7 were handled in the basic pass
4616 		 */
4617 		if (n == 6 || n == 7)
4618 			continue;
4619 
4620 		cp->cp_eax = n;
4621 
4622 		/*
4623 		 * CPUID function 4 expects %ecx to be initialized
4624 		 * with an index which indicates which cache to return
4625 		 * information about. The OS is expected to call function 4
4626 		 * with %ecx set to 0, 1, 2, ... until it returns with
4627 		 * EAX[4:0] set to 0, which indicates there are no more
4628 		 * caches.
4629 		 *
4630 		 * Here, populate cpi_std[4] with the information returned by
4631 		 * function 4 when %ecx == 0, and do the rest in a later pass
4632 		 * when dynamic memory allocation becomes available.
4633 		 *
4634 		 * Note: we need to explicitly initialize %ecx here, since
4635 		 * function 4 may have been previously invoked.
4636 		 */
4637 		if (n == 4)
4638 			cp->cp_ecx = 0;
4639 
4640 		(void) __cpuid_insn(cp);
4641 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
4642 		switch (n) {
4643 		case 2:
4644 			/*
4645 			 * "the lower 8 bits of the %eax register
4646 			 * contain a value that identifies the number
4647 			 * of times the cpuid [instruction] has to be
4648 			 * executed to obtain a complete image of the
4649 			 * processor's caching systems."
4650 			 *
4651 			 * How *do* they make this stuff up?
4652 			 */
4653 			cpi->cpi_ncache = sizeof (*cp) *
4654 			    BITX(cp->cp_eax, 7, 0);
4655 			if (cpi->cpi_ncache == 0)
4656 				break;
4657 			cpi->cpi_ncache--;	/* skip count byte */
4658 
4659 			/*
4660 			 * Well, for now, rather than attempt to implement
4661 			 * this slightly dubious algorithm, we just look
4662 			 * at the first 15 ..
4663 			 */
4664 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
4665 				cpi->cpi_ncache = sizeof (*cp) - 1;
4666 
4667 			dp = cpi->cpi_cacheinfo;
4668 			if (BITX(cp->cp_eax, 31, 31) == 0) {
4669 				uint8_t *p = (void *)&cp->cp_eax;
4670 				for (i = 1; i < 4; i++)
4671 					if (p[i] != 0)
4672 						*dp++ = p[i];
4673 			}
4674 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
4675 				uint8_t *p = (void *)&cp->cp_ebx;
4676 				for (i = 0; i < 4; i++)
4677 					if (p[i] != 0)
4678 						*dp++ = p[i];
4679 			}
4680 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
4681 				uint8_t *p = (void *)&cp->cp_ecx;
4682 				for (i = 0; i < 4; i++)
4683 					if (p[i] != 0)
4684 						*dp++ = p[i];
4685 			}
4686 			if (BITX(cp->cp_edx, 31, 31) == 0) {
4687 				uint8_t *p = (void *)&cp->cp_edx;
4688 				for (i = 0; i < 4; i++)
4689 					if (p[i] != 0)
4690 						*dp++ = p[i];
4691 			}
4692 			break;
4693 
4694 		case 3:	/* Processor serial number, if PSN supported */
4695 			break;
4696 
4697 		case 4:	/* Deterministic cache parameters */
4698 			break;
4699 
4700 		case 5:	/* Monitor/Mwait parameters */
4701 		{
4702 			size_t mwait_size;
4703 
4704 			/*
4705 			 * check cpi_mwait.support which was set in
4706 			 * cpuid_pass_basic()
4707 			 */
4708 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
4709 				break;
4710 
4711 			/*
4712 			 * Protect ourself from insane mwait line size.
4713 			 * Workaround for incomplete hardware emulator(s).
4714 			 */
4715 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
4716 			if (mwait_size < sizeof (uint32_t) ||
4717 			    !ISP2(mwait_size)) {
4718 #if DEBUG
4719 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
4720 				    "size %ld", cpu->cpu_id, (long)mwait_size);
4721 #endif
4722 				break;
4723 			}
4724 
4725 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
4726 			cpi->cpi_mwait.mon_max = mwait_size;
4727 			if (MWAIT_EXTENSION(cpi)) {
4728 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
4729 				if (MWAIT_INT_ENABLE(cpi))
4730 					cpi->cpi_mwait.support |=
4731 					    MWAIT_ECX_INT_ENABLE;
4732 			}
4733 			break;
4734 		}
4735 		default:
4736 			break;
4737 		}
4738 	}
4739 
4740 	/*
4741 	 * XSAVE enumeration
4742 	 */
4743 	if (cpi->cpi_maxeax >= 0xD) {
4744 		struct cpuid_regs regs;
4745 		boolean_t cpuid_d_valid = B_TRUE;
4746 
4747 		cp = &regs;
4748 		cp->cp_eax = 0xD;
4749 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
4750 
4751 		(void) __cpuid_insn(cp);
4752 
4753 		/*
4754 		 * Sanity checks for debug
4755 		 */
4756 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
4757 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
4758 			cpuid_d_valid = B_FALSE;
4759 		}
4760 
4761 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
4762 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
4763 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
4764 
4765 		/*
4766 		 * If the hw supports AVX, get the size and offset in the save
4767 		 * area for the ymm state.
4768 		 */
4769 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
4770 			cp->cp_eax = 0xD;
4771 			cp->cp_ecx = 2;
4772 			cp->cp_edx = cp->cp_ebx = 0;
4773 
4774 			(void) __cpuid_insn(cp);
4775 
4776 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
4777 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
4778 				cpuid_d_valid = B_FALSE;
4779 			}
4780 
4781 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
4782 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
4783 		}
4784 
4785 		/*
4786 		 * If the hw supports MPX, get the size and offset in the
4787 		 * save area for BNDREGS and BNDCSR.
4788 		 */
4789 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_MPX) {
4790 			cp->cp_eax = 0xD;
4791 			cp->cp_ecx = 3;
4792 			cp->cp_edx = cp->cp_ebx = 0;
4793 
4794 			(void) __cpuid_insn(cp);
4795 
4796 			cpi->cpi_xsave.bndregs_size = cp->cp_eax;
4797 			cpi->cpi_xsave.bndregs_offset = cp->cp_ebx;
4798 
4799 			cp->cp_eax = 0xD;
4800 			cp->cp_ecx = 4;
4801 			cp->cp_edx = cp->cp_ebx = 0;
4802 
4803 			(void) __cpuid_insn(cp);
4804 
4805 			cpi->cpi_xsave.bndcsr_size = cp->cp_eax;
4806 			cpi->cpi_xsave.bndcsr_offset = cp->cp_ebx;
4807 		}
4808 
4809 		/*
4810 		 * If the hw supports AVX512, get the size and offset in the
4811 		 * save area for the opmask registers and zmm state.
4812 		 */
4813 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX512) {
4814 			cp->cp_eax = 0xD;
4815 			cp->cp_ecx = 5;
4816 			cp->cp_edx = cp->cp_ebx = 0;
4817 
4818 			(void) __cpuid_insn(cp);
4819 
4820 			cpi->cpi_xsave.opmask_size = cp->cp_eax;
4821 			cpi->cpi_xsave.opmask_offset = cp->cp_ebx;
4822 
4823 			cp->cp_eax = 0xD;
4824 			cp->cp_ecx = 6;
4825 			cp->cp_edx = cp->cp_ebx = 0;
4826 
4827 			(void) __cpuid_insn(cp);
4828 
4829 			cpi->cpi_xsave.zmmlo_size = cp->cp_eax;
4830 			cpi->cpi_xsave.zmmlo_offset = cp->cp_ebx;
4831 
4832 			cp->cp_eax = 0xD;
4833 			cp->cp_ecx = 7;
4834 			cp->cp_edx = cp->cp_ebx = 0;
4835 
4836 			(void) __cpuid_insn(cp);
4837 
4838 			cpi->cpi_xsave.zmmhi_size = cp->cp_eax;
4839 			cpi->cpi_xsave.zmmhi_offset = cp->cp_ebx;
4840 		}
4841 
4842 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
4843 			xsave_state_size = 0;
4844 		} else if (cpuid_d_valid) {
4845 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
4846 		} else {
4847 			/* Broken CPUID 0xD, probably in HVM */
4848 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
4849 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
4850 			    ", ymm_size = %d, ymm_offset = %d\n",
4851 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
4852 			    cpi->cpi_xsave.xsav_hw_features_high,
4853 			    (int)cpi->cpi_xsave.xsav_max_size,
4854 			    (int)cpi->cpi_xsave.ymm_size,
4855 			    (int)cpi->cpi_xsave.ymm_offset);
4856 
4857 			if (xsave_state_size != 0) {
4858 				/*
4859 				 * This must be a non-boot CPU. We cannot
4860 				 * continue, because boot cpu has already
4861 				 * enabled XSAVE.
4862 				 */
4863 				ASSERT(cpu->cpu_id != 0);
4864 				cmn_err(CE_PANIC, "cpu%d: we have already "
4865 				    "enabled XSAVE on boot cpu, cannot "
4866 				    "continue.", cpu->cpu_id);
4867 			} else {
4868 				/*
4869 				 * If we reached here on the boot CPU, it's also
4870 				 * almost certain that we'll reach here on the
4871 				 * non-boot CPUs. When we're here on a boot CPU
4872 				 * we should disable the feature, on a non-boot
4873 				 * CPU we need to confirm that we have.
4874 				 */
4875 				if (cpu->cpu_id == 0) {
4876 					remove_x86_feature(x86_featureset,
4877 					    X86FSET_XSAVE);
4878 					remove_x86_feature(x86_featureset,
4879 					    X86FSET_AVX);
4880 					remove_x86_feature(x86_featureset,
4881 					    X86FSET_F16C);
4882 					remove_x86_feature(x86_featureset,
4883 					    X86FSET_BMI1);
4884 					remove_x86_feature(x86_featureset,
4885 					    X86FSET_BMI2);
4886 					remove_x86_feature(x86_featureset,
4887 					    X86FSET_FMA);
4888 					remove_x86_feature(x86_featureset,
4889 					    X86FSET_AVX2);
4890 					remove_x86_feature(x86_featureset,
4891 					    X86FSET_MPX);
4892 					remove_x86_feature(x86_featureset,
4893 					    X86FSET_AVX512F);
4894 					remove_x86_feature(x86_featureset,
4895 					    X86FSET_AVX512DQ);
4896 					remove_x86_feature(x86_featureset,
4897 					    X86FSET_AVX512PF);
4898 					remove_x86_feature(x86_featureset,
4899 					    X86FSET_AVX512ER);
4900 					remove_x86_feature(x86_featureset,
4901 					    X86FSET_AVX512CD);
4902 					remove_x86_feature(x86_featureset,
4903 					    X86FSET_AVX512BW);
4904 					remove_x86_feature(x86_featureset,
4905 					    X86FSET_AVX512VL);
4906 					remove_x86_feature(x86_featureset,
4907 					    X86FSET_AVX512FMA);
4908 					remove_x86_feature(x86_featureset,
4909 					    X86FSET_AVX512VBMI);
4910 					remove_x86_feature(x86_featureset,
4911 					    X86FSET_AVX512VNNI);
4912 					remove_x86_feature(x86_featureset,
4913 					    X86FSET_AVX512VPOPCDQ);
4914 					remove_x86_feature(x86_featureset,
4915 					    X86FSET_AVX512NNIW);
4916 					remove_x86_feature(x86_featureset,
4917 					    X86FSET_AVX512FMAPS);
4918 					remove_x86_feature(x86_featureset,
4919 					    X86FSET_VAES);
4920 					remove_x86_feature(x86_featureset,
4921 					    X86FSET_VPCLMULQDQ);
4922 					remove_x86_feature(x86_featureset,
4923 					    X86FSET_GFNI);
4924 					remove_x86_feature(x86_featureset,
4925 					    X86FSET_AVX512_VP2INT);
4926 					remove_x86_feature(x86_featureset,
4927 					    X86FSET_AVX512_BITALG);
4928 					remove_x86_feature(x86_featureset,
4929 					    X86FSET_AVX512_VBMI2);
4930 					remove_x86_feature(x86_featureset,
4931 					    X86FSET_AVX512_BF16);
4932 
4933 					xsave_force_disable = B_TRUE;
4934 				} else {
4935 					VERIFY(is_x86_feature(x86_featureset,
4936 					    X86FSET_XSAVE) == B_FALSE);
4937 				}
4938 			}
4939 		}
4940 	}
4941 
4942 
4943 	if ((cpi->cpi_xmaxeax & CPUID_LEAF_EXT_0) == 0)
4944 		return;
4945 
4946 	if ((nmax = cpi->cpi_xmaxeax - CPUID_LEAF_EXT_0 + 1) > NMAX_CPI_EXTD)
4947 		nmax = NMAX_CPI_EXTD;
4948 	/*
4949 	 * Copy the extended properties, fixing them as we go. While we start at
4950 	 * 2 because we've already handled a few cases in the basic pass, the
4951 	 * rest we let ourselves just grab again (e.g. 0x8, 0x21).
4952 	 */
4953 	iptr = (void *)cpi->cpi_brandstr;
4954 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
4955 		cp->cp_eax = CPUID_LEAF_EXT_0 + n;
4956 		(void) __cpuid_insn(cp);
4957 		platform_cpuid_mangle(cpi->cpi_vendor, CPUID_LEAF_EXT_0 + n,
4958 		    cp);
4959 		switch (n) {
4960 		case 2:
4961 		case 3:
4962 		case 4:
4963 			/*
4964 			 * Extract the brand string
4965 			 */
4966 			*iptr++ = cp->cp_eax;
4967 			*iptr++ = cp->cp_ebx;
4968 			*iptr++ = cp->cp_ecx;
4969 			*iptr++ = cp->cp_edx;
4970 			break;
4971 		case 5:
4972 			switch (cpi->cpi_vendor) {
4973 			case X86_VENDOR_AMD:
4974 				/*
4975 				 * The Athlon and Duron were the first
4976 				 * parts to report the sizes of the
4977 				 * TLB for large pages. Before then,
4978 				 * we don't trust the data.
4979 				 */
4980 				if (cpi->cpi_family < 6 ||
4981 				    (cpi->cpi_family == 6 &&
4982 				    cpi->cpi_model < 1))
4983 					cp->cp_eax = 0;
4984 				break;
4985 			default:
4986 				break;
4987 			}
4988 			break;
4989 		case 6:
4990 			switch (cpi->cpi_vendor) {
4991 			case X86_VENDOR_AMD:
4992 				/*
4993 				 * The Athlon and Duron were the first
4994 				 * AMD parts with L2 TLB's.
4995 				 * Before then, don't trust the data.
4996 				 */
4997 				if (cpi->cpi_family < 6 ||
4998 				    (cpi->cpi_family == 6 &&
4999 				    cpi->cpi_model < 1))
5000 					cp->cp_eax = cp->cp_ebx = 0;
5001 				/*
5002 				 * AMD Duron rev A0 reports L2
5003 				 * cache size incorrectly as 1K
5004 				 * when it is really 64K
5005 				 */
5006 				if (cpi->cpi_family == 6 &&
5007 				    cpi->cpi_model == 3 &&
5008 				    cpi->cpi_step == 0) {
5009 					cp->cp_ecx &= 0xffff;
5010 					cp->cp_ecx |= 0x400000;
5011 				}
5012 				break;
5013 			case X86_VENDOR_Cyrix:	/* VIA C3 */
5014 				/*
5015 				 * VIA C3 processors are a bit messed
5016 				 * up w.r.t. encoding cache sizes in %ecx
5017 				 */
5018 				if (cpi->cpi_family != 6)
5019 					break;
5020 				/*
5021 				 * model 7 and 8 were incorrectly encoded
5022 				 *
5023 				 * xxx is model 8 really broken?
5024 				 */
5025 				if (cpi->cpi_model == 7 ||
5026 				    cpi->cpi_model == 8)
5027 					cp->cp_ecx =
5028 					    BITX(cp->cp_ecx, 31, 24) << 16 |
5029 					    BITX(cp->cp_ecx, 23, 16) << 12 |
5030 					    BITX(cp->cp_ecx, 15, 8) << 8 |
5031 					    BITX(cp->cp_ecx, 7, 0);
5032 				/*
5033 				 * model 9 stepping 1 has wrong associativity
5034 				 */
5035 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
5036 					cp->cp_ecx |= 8 << 12;
5037 				break;
5038 			case X86_VENDOR_Intel:
5039 				/*
5040 				 * Extended L2 Cache features function.
5041 				 * First appeared on Prescott.
5042 				 */
5043 			default:
5044 				break;
5045 			}
5046 			break;
5047 		default:
5048 			break;
5049 		}
5050 	}
5051 }
5052 
5053 static const char *
5054 intel_cpubrand(const struct cpuid_info *cpi)
5055 {
5056 	int i;
5057 
5058 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
5059 
5060 	switch (cpi->cpi_family) {
5061 	case 5:
5062 		return ("Intel Pentium(r)");
5063 	case 6:
5064 		switch (cpi->cpi_model) {
5065 			uint_t celeron, xeon;
5066 			const struct cpuid_regs *cp;
5067 		case 0:
5068 		case 1:
5069 		case 2:
5070 			return ("Intel Pentium(r) Pro");
5071 		case 3:
5072 		case 4:
5073 			return ("Intel Pentium(r) II");
5074 		case 6:
5075 			return ("Intel Celeron(r)");
5076 		case 5:
5077 		case 7:
5078 			celeron = xeon = 0;
5079 			cp = &cpi->cpi_std[2];	/* cache info */
5080 
5081 			for (i = 1; i < 4; i++) {
5082 				uint_t tmp;
5083 
5084 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
5085 				if (tmp == 0x40)
5086 					celeron++;
5087 				if (tmp >= 0x44 && tmp <= 0x45)
5088 					xeon++;
5089 			}
5090 
5091 			for (i = 0; i < 2; i++) {
5092 				uint_t tmp;
5093 
5094 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
5095 				if (tmp == 0x40)
5096 					celeron++;
5097 				else if (tmp >= 0x44 && tmp <= 0x45)
5098 					xeon++;
5099 			}
5100 
5101 			for (i = 0; i < 4; i++) {
5102 				uint_t tmp;
5103 
5104 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
5105 				if (tmp == 0x40)
5106 					celeron++;
5107 				else if (tmp >= 0x44 && tmp <= 0x45)
5108 					xeon++;
5109 			}
5110 
5111 			for (i = 0; i < 4; i++) {
5112 				uint_t tmp;
5113 
5114 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
5115 				if (tmp == 0x40)
5116 					celeron++;
5117 				else if (tmp >= 0x44 && tmp <= 0x45)
5118 					xeon++;
5119 			}
5120 
5121 			if (celeron)
5122 				return ("Intel Celeron(r)");
5123 			if (xeon)
5124 				return (cpi->cpi_model == 5 ?
5125 				    "Intel Pentium(r) II Xeon(tm)" :
5126 				    "Intel Pentium(r) III Xeon(tm)");
5127 			return (cpi->cpi_model == 5 ?
5128 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
5129 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
5130 		default:
5131 			break;
5132 		}
5133 	default:
5134 		break;
5135 	}
5136 
5137 	/* BrandID is present if the field is nonzero */
5138 	if (cpi->cpi_brandid != 0) {
5139 		static const struct {
5140 			uint_t bt_bid;
5141 			const char *bt_str;
5142 		} brand_tbl[] = {
5143 			{ 0x1,	"Intel(r) Celeron(r)" },
5144 			{ 0x2,	"Intel(r) Pentium(r) III" },
5145 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
5146 			{ 0x4,	"Intel(r) Pentium(r) III" },
5147 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
5148 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
5149 			{ 0x8,	"Intel(r) Pentium(r) 4" },
5150 			{ 0x9,	"Intel(r) Pentium(r) 4" },
5151 			{ 0xa,	"Intel(r) Celeron(r)" },
5152 			{ 0xb,	"Intel(r) Xeon(tm)" },
5153 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
5154 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
5155 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
5156 			{ 0x11, "Mobile Genuine Intel(r)" },
5157 			{ 0x12, "Intel(r) Celeron(r) M" },
5158 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
5159 			{ 0x14, "Intel(r) Celeron(r)" },
5160 			{ 0x15, "Mobile Genuine Intel(r)" },
5161 			{ 0x16,	"Intel(r) Pentium(r) M" },
5162 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
5163 		};
5164 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
5165 		uint_t sgn;
5166 
5167 		sgn = (cpi->cpi_family << 8) |
5168 		    (cpi->cpi_model << 4) | cpi->cpi_step;
5169 
5170 		for (i = 0; i < btblmax; i++)
5171 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
5172 				break;
5173 		if (i < btblmax) {
5174 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
5175 				return ("Intel(r) Celeron(r)");
5176 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
5177 				return ("Intel(r) Xeon(tm) MP");
5178 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
5179 				return ("Intel(r) Xeon(tm)");
5180 			return (brand_tbl[i].bt_str);
5181 		}
5182 	}
5183 
5184 	return (NULL);
5185 }
5186 
5187 static const char *
5188 amd_cpubrand(const struct cpuid_info *cpi)
5189 {
5190 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
5191 
5192 	switch (cpi->cpi_family) {
5193 	case 5:
5194 		switch (cpi->cpi_model) {
5195 		case 0:
5196 		case 1:
5197 		case 2:
5198 		case 3:
5199 		case 4:
5200 		case 5:
5201 			return ("AMD-K5(r)");
5202 		case 6:
5203 		case 7:
5204 			return ("AMD-K6(r)");
5205 		case 8:
5206 			return ("AMD-K6(r)-2");
5207 		case 9:
5208 			return ("AMD-K6(r)-III");
5209 		default:
5210 			return ("AMD (family 5)");
5211 		}
5212 	case 6:
5213 		switch (cpi->cpi_model) {
5214 		case 1:
5215 			return ("AMD-K7(tm)");
5216 		case 0:
5217 		case 2:
5218 		case 4:
5219 			return ("AMD Athlon(tm)");
5220 		case 3:
5221 		case 7:
5222 			return ("AMD Duron(tm)");
5223 		case 6:
5224 		case 8:
5225 		case 10:
5226 			/*
5227 			 * Use the L2 cache size to distinguish
5228 			 */
5229 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
5230 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
5231 		default:
5232 			return ("AMD (family 6)");
5233 		}
5234 	default:
5235 		break;
5236 	}
5237 
5238 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
5239 	    cpi->cpi_brandid != 0) {
5240 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
5241 		case 3:
5242 			return ("AMD Opteron(tm) UP 1xx");
5243 		case 4:
5244 			return ("AMD Opteron(tm) DP 2xx");
5245 		case 5:
5246 			return ("AMD Opteron(tm) MP 8xx");
5247 		default:
5248 			return ("AMD Opteron(tm)");
5249 		}
5250 	}
5251 
5252 	return (NULL);
5253 }
5254 
5255 static const char *
5256 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
5257 {
5258 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
5259 
5260 	switch (type) {
5261 	case X86_TYPE_CYRIX_6x86:
5262 		return ("Cyrix 6x86");
5263 	case X86_TYPE_CYRIX_6x86L:
5264 		return ("Cyrix 6x86L");
5265 	case X86_TYPE_CYRIX_6x86MX:
5266 		return ("Cyrix 6x86MX");
5267 	case X86_TYPE_CYRIX_GXm:
5268 		return ("Cyrix GXm");
5269 	case X86_TYPE_CYRIX_MediaGX:
5270 		return ("Cyrix MediaGX");
5271 	case X86_TYPE_CYRIX_MII:
5272 		return ("Cyrix M2");
5273 	case X86_TYPE_VIA_CYRIX_III:
5274 		return ("VIA Cyrix M3");
5275 	default:
5276 		/*
5277 		 * Have another wild guess ..
5278 		 */
5279 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
5280 			return ("Cyrix 5x86");
5281 		else if (cpi->cpi_family == 5) {
5282 			switch (cpi->cpi_model) {
5283 			case 2:
5284 				return ("Cyrix 6x86");	/* Cyrix M1 */
5285 			case 4:
5286 				return ("Cyrix MediaGX");
5287 			default:
5288 				break;
5289 			}
5290 		} else if (cpi->cpi_family == 6) {
5291 			switch (cpi->cpi_model) {
5292 			case 0:
5293 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
5294 			case 5:
5295 			case 6:
5296 			case 7:
5297 			case 8:
5298 			case 9:
5299 				return ("VIA C3");
5300 			default:
5301 				break;
5302 			}
5303 		}
5304 		break;
5305 	}
5306 	return (NULL);
5307 }
5308 
5309 /*
5310  * This only gets called in the case that the CPU extended
5311  * feature brand string (0x80000002, 0x80000003, 0x80000004)
5312  * aren't available, or contain null bytes for some reason.
5313  */
5314 static void
5315 fabricate_brandstr(struct cpuid_info *cpi)
5316 {
5317 	const char *brand = NULL;
5318 
5319 	switch (cpi->cpi_vendor) {
5320 	case X86_VENDOR_Intel:
5321 		brand = intel_cpubrand(cpi);
5322 		break;
5323 	case X86_VENDOR_AMD:
5324 		brand = amd_cpubrand(cpi);
5325 		break;
5326 	case X86_VENDOR_Cyrix:
5327 		brand = cyrix_cpubrand(cpi, x86_type);
5328 		break;
5329 	case X86_VENDOR_NexGen:
5330 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
5331 			brand = "NexGen Nx586";
5332 		break;
5333 	case X86_VENDOR_Centaur:
5334 		if (cpi->cpi_family == 5)
5335 			switch (cpi->cpi_model) {
5336 			case 4:
5337 				brand = "Centaur C6";
5338 				break;
5339 			case 8:
5340 				brand = "Centaur C2";
5341 				break;
5342 			case 9:
5343 				brand = "Centaur C3";
5344 				break;
5345 			default:
5346 				break;
5347 			}
5348 		break;
5349 	case X86_VENDOR_Rise:
5350 		if (cpi->cpi_family == 5 &&
5351 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
5352 			brand = "Rise mP6";
5353 		break;
5354 	case X86_VENDOR_SiS:
5355 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
5356 			brand = "SiS 55x";
5357 		break;
5358 	case X86_VENDOR_TM:
5359 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
5360 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
5361 		break;
5362 	case X86_VENDOR_NSC:
5363 	case X86_VENDOR_UMC:
5364 	default:
5365 		break;
5366 	}
5367 	if (brand) {
5368 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
5369 		return;
5370 	}
5371 
5372 	/*
5373 	 * If all else fails ...
5374 	 */
5375 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
5376 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
5377 	    cpi->cpi_model, cpi->cpi_step);
5378 }
5379 
5380 /*
5381  * This routine is called just after kernel memory allocation
5382  * becomes available on cpu0, and as part of mp_startup() on
5383  * the other cpus.
5384  *
5385  * Fixup the brand string, and collect any information from cpuid
5386  * that requires dynamically allocated storage to represent.
5387  */
5388 
5389 static void
5390 cpuid_pass_dynamic(cpu_t *cpu, void *_arg __unused)
5391 {
5392 	int	i, max, shft, level, size;
5393 	struct cpuid_regs regs;
5394 	struct cpuid_regs *cp;
5395 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
5396 
5397 	/*
5398 	 * Deterministic cache parameters
5399 	 *
5400 	 * Intel uses leaf 0x4 for this, while AMD uses leaf 0x8000001d. The
5401 	 * values that are present are currently defined to be the same. This
5402 	 * means we can use the same logic to parse it as long as we use the
5403 	 * appropriate leaf to get the data. If you're updating this, make sure
5404 	 * you're careful about which vendor supports which aspect.
5405 	 *
5406 	 * Take this opportunity to detect the number of threads sharing the
5407 	 * last level cache, and construct a corresponding cache id. The
5408 	 * respective cpuid_info members are initialized to the default case of
5409 	 * "no last level cache sharing".
5410 	 */
5411 	cpi->cpi_ncpu_shr_last_cache = 1;
5412 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
5413 
5414 	if ((cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) ||
5415 	    ((cpi->cpi_vendor == X86_VENDOR_AMD ||
5416 	    cpi->cpi_vendor == X86_VENDOR_HYGON) &&
5417 	    cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1d &&
5418 	    is_x86_feature(x86_featureset, X86FSET_TOPOEXT))) {
5419 		uint32_t leaf;
5420 
5421 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
5422 			leaf = 4;
5423 		} else {
5424 			leaf = CPUID_LEAF_EXT_1d;
5425 		}
5426 
5427 		/*
5428 		 * Find the # of elements (size) returned by the leaf and along
5429 		 * the way detect last level cache sharing details.
5430 		 */
5431 		bzero(&regs, sizeof (regs));
5432 		cp = &regs;
5433 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
5434 			cp->cp_eax = leaf;
5435 			cp->cp_ecx = i;
5436 
5437 			(void) __cpuid_insn(cp);
5438 
5439 			if (CPI_CACHE_TYPE(cp) == 0)
5440 				break;
5441 			level = CPI_CACHE_LVL(cp);
5442 			if (level > max) {
5443 				max = level;
5444 				cpi->cpi_ncpu_shr_last_cache =
5445 				    CPI_NTHR_SHR_CACHE(cp) + 1;
5446 			}
5447 		}
5448 		cpi->cpi_cache_leaf_size = size = i;
5449 
5450 		/*
5451 		 * Allocate the cpi_cache_leaves array. The first element
5452 		 * references the regs for the corresponding leaf with %ecx set
5453 		 * to 0. This was gathered in cpuid_pass_extended().
5454 		 */
5455 		if (size > 0) {
5456 			cpi->cpi_cache_leaves =
5457 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
5458 			if (cpi->cpi_vendor == X86_VENDOR_Intel) {
5459 				cpi->cpi_cache_leaves[0] = &cpi->cpi_std[4];
5460 			} else {
5461 				cpi->cpi_cache_leaves[0] = &cpi->cpi_extd[0x1d];
5462 			}
5463 
5464 			/*
5465 			 * Allocate storage to hold the additional regs
5466 			 * for the leaf, %ecx == 1 .. cpi_cache_leaf_size.
5467 			 *
5468 			 * The regs for the leaf, %ecx == 0 has already
5469 			 * been allocated as indicated above.
5470 			 */
5471 			for (i = 1; i < size; i++) {
5472 				cp = cpi->cpi_cache_leaves[i] =
5473 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
5474 				cp->cp_eax = leaf;
5475 				cp->cp_ecx = i;
5476 
5477 				(void) __cpuid_insn(cp);
5478 			}
5479 		}
5480 		/*
5481 		 * Determine the number of bits needed to represent
5482 		 * the number of CPUs sharing the last level cache.
5483 		 *
5484 		 * Shift off that number of bits from the APIC id to
5485 		 * derive the cache id.
5486 		 */
5487 		shft = 0;
5488 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
5489 			shft++;
5490 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
5491 	}
5492 
5493 	/*
5494 	 * Now fixup the brand string
5495 	 */
5496 	if ((cpi->cpi_xmaxeax & CPUID_LEAF_EXT_0) == 0) {
5497 		fabricate_brandstr(cpi);
5498 	} else {
5499 
5500 		/*
5501 		 * If we successfully extracted a brand string from the cpuid
5502 		 * instruction, clean it up by removing leading spaces and
5503 		 * similar junk.
5504 		 */
5505 		if (cpi->cpi_brandstr[0]) {
5506 			size_t maxlen = sizeof (cpi->cpi_brandstr);
5507 			char *src, *dst;
5508 
5509 			dst = src = (char *)cpi->cpi_brandstr;
5510 			src[maxlen - 1] = '\0';
5511 			/*
5512 			 * strip leading spaces
5513 			 */
5514 			while (*src == ' ')
5515 				src++;
5516 			/*
5517 			 * Remove any 'Genuine' or "Authentic" prefixes
5518 			 */
5519 			if (strncmp(src, "Genuine ", 8) == 0)
5520 				src += 8;
5521 			if (strncmp(src, "Authentic ", 10) == 0)
5522 				src += 10;
5523 
5524 			/*
5525 			 * Now do an in-place copy.
5526 			 * Map (R) to (r) and (TM) to (tm).
5527 			 * The era of teletypes is long gone, and there's
5528 			 * -really- no need to shout.
5529 			 */
5530 			while (*src != '\0') {
5531 				if (src[0] == '(') {
5532 					if (strncmp(src + 1, "R)", 2) == 0) {
5533 						(void) strncpy(dst, "(r)", 3);
5534 						src += 3;
5535 						dst += 3;
5536 						continue;
5537 					}
5538 					if (strncmp(src + 1, "TM)", 3) == 0) {
5539 						(void) strncpy(dst, "(tm)", 4);
5540 						src += 4;
5541 						dst += 4;
5542 						continue;
5543 					}
5544 				}
5545 				*dst++ = *src++;
5546 			}
5547 			*dst = '\0';
5548 
5549 			/*
5550 			 * Finally, remove any trailing spaces
5551 			 */
5552 			while (--dst > cpi->cpi_brandstr)
5553 				if (*dst == ' ')
5554 					*dst = '\0';
5555 				else
5556 					break;
5557 		} else
5558 			fabricate_brandstr(cpi);
5559 	}
5560 }
5561 
5562 typedef struct {
5563 	uint32_t avm_av;
5564 	uint32_t avm_feat;
5565 } av_feat_map_t;
5566 
5567 /*
5568  * These arrays are used to map features that we should add based on x86
5569  * features that are present. As a large number depend on kernel features,
5570  * rather than rechecking and clearing CPUID everywhere, we simply map these.
5571  * There is an array of these for each hwcap word. Some features aren't tracked
5572  * in the kernel x86 featureset and that's ok. They will not show up in here.
5573  */
5574 static const av_feat_map_t x86fset_to_av1[] = {
5575 	{ AV_386_CX8, X86FSET_CX8 },
5576 	{ AV_386_SEP, X86FSET_SEP },
5577 	{ AV_386_AMD_SYSC, X86FSET_ASYSC },
5578 	{ AV_386_CMOV, X86FSET_CMOV },
5579 	{ AV_386_FXSR, X86FSET_SSE },
5580 	{ AV_386_SSE, X86FSET_SSE },
5581 	{ AV_386_SSE2, X86FSET_SSE2 },
5582 	{ AV_386_SSE3, X86FSET_SSE3 },
5583 	{ AV_386_CX16, X86FSET_CX16 },
5584 	{ AV_386_TSCP, X86FSET_TSCP },
5585 	{ AV_386_AMD_SSE4A, X86FSET_SSE4A },
5586 	{ AV_386_SSSE3, X86FSET_SSSE3 },
5587 	{ AV_386_SSE4_1, X86FSET_SSE4_1 },
5588 	{ AV_386_SSE4_2, X86FSET_SSE4_2 },
5589 	{ AV_386_AES, X86FSET_AES },
5590 	{ AV_386_PCLMULQDQ, X86FSET_PCLMULQDQ },
5591 	{ AV_386_XSAVE, X86FSET_XSAVE },
5592 	{ AV_386_AVX, X86FSET_AVX },
5593 	{ AV_386_VMX, X86FSET_VMX },
5594 	{ AV_386_AMD_SVM, X86FSET_SVM }
5595 };
5596 
5597 static const av_feat_map_t x86fset_to_av2[] = {
5598 	{ AV_386_2_F16C, X86FSET_F16C },
5599 	{ AV_386_2_RDRAND, X86FSET_RDRAND },
5600 	{ AV_386_2_BMI1, X86FSET_BMI1 },
5601 	{ AV_386_2_BMI2, X86FSET_BMI2 },
5602 	{ AV_386_2_FMA, X86FSET_FMA },
5603 	{ AV_386_2_AVX2, X86FSET_AVX2 },
5604 	{ AV_386_2_ADX, X86FSET_ADX },
5605 	{ AV_386_2_RDSEED, X86FSET_RDSEED },
5606 	{ AV_386_2_AVX512F, X86FSET_AVX512F },
5607 	{ AV_386_2_AVX512DQ, X86FSET_AVX512DQ },
5608 	{ AV_386_2_AVX512IFMA, X86FSET_AVX512FMA },
5609 	{ AV_386_2_AVX512PF, X86FSET_AVX512PF },
5610 	{ AV_386_2_AVX512ER, X86FSET_AVX512ER },
5611 	{ AV_386_2_AVX512CD, X86FSET_AVX512CD },
5612 	{ AV_386_2_AVX512BW, X86FSET_AVX512BW },
5613 	{ AV_386_2_AVX512VL, X86FSET_AVX512VL },
5614 	{ AV_386_2_AVX512VBMI, X86FSET_AVX512VBMI },
5615 	{ AV_386_2_AVX512VPOPCDQ, X86FSET_AVX512VPOPCDQ },
5616 	{ AV_386_2_SHA, X86FSET_SHA },
5617 	{ AV_386_2_FSGSBASE, X86FSET_FSGSBASE },
5618 	{ AV_386_2_CLFLUSHOPT, X86FSET_CLFLUSHOPT },
5619 	{ AV_386_2_CLWB, X86FSET_CLWB },
5620 	{ AV_386_2_MONITORX, X86FSET_MONITORX },
5621 	{ AV_386_2_CLZERO, X86FSET_CLZERO },
5622 	{ AV_386_2_AVX512_VNNI, X86FSET_AVX512VNNI },
5623 	{ AV_386_2_VPCLMULQDQ, X86FSET_VPCLMULQDQ },
5624 	{ AV_386_2_VAES, X86FSET_VAES },
5625 	{ AV_386_2_GFNI, X86FSET_GFNI },
5626 	{ AV_386_2_AVX512_VP2INT, X86FSET_AVX512_VP2INT },
5627 	{ AV_386_2_AVX512_BITALG, X86FSET_AVX512_BITALG }
5628 };
5629 
5630 static const av_feat_map_t x86fset_to_av3[] = {
5631 	{ AV_386_3_AVX512_VBMI2, X86FSET_AVX512_VBMI2 },
5632 	{ AV_386_3_AVX512_BF16, X86FSET_AVX512_BF16 }
5633 };
5634 
5635 /*
5636  * This routine is called out of bind_hwcap() much later in the life
5637  * of the kernel (post_startup()).  The job of this routine is to resolve
5638  * the hardware feature support and kernel support for those features into
5639  * what we're actually going to tell applications via the aux vector.
5640  *
5641  * Most of the aux vector is derived from the x86_featureset array vector where
5642  * a given feature indicates that an aux vector should be plumbed through. This
5643  * allows the kernel to use one tracking mechanism for these based on whether or
5644  * not it has the required hardware support (most often xsave). Most newer
5645  * features are added there in case we need them in the kernel. Otherwise,
5646  * features are evaluated based on looking at the cpuid features that remain. If
5647  * you find yourself wanting to clear out cpuid features for some reason, they
5648  * should instead be driven by the feature set so we have a consistent view.
5649  */
5650 
5651 static void
5652 cpuid_pass_resolve(cpu_t *cpu, void *arg)
5653 {
5654 	uint_t *hwcap_out = (uint_t *)arg;
5655 	struct cpuid_info *cpi;
5656 	uint_t hwcap_flags = 0, hwcap_flags_2 = 0, hwcap_flags_3 = 0;
5657 
5658 	cpi = cpu->cpu_m.mcpu_cpi;
5659 
5660 	for (uint_t i = 0; i < ARRAY_SIZE(x86fset_to_av1); i++) {
5661 		if (is_x86_feature(x86_featureset,
5662 		    x86fset_to_av1[i].avm_feat)) {
5663 			hwcap_flags |= x86fset_to_av1[i].avm_av;
5664 		}
5665 	}
5666 
5667 	for (uint_t i = 0; i < ARRAY_SIZE(x86fset_to_av2); i++) {
5668 		if (is_x86_feature(x86_featureset,
5669 		    x86fset_to_av2[i].avm_feat)) {
5670 			hwcap_flags_2 |= x86fset_to_av2[i].avm_av;
5671 		}
5672 	}
5673 
5674 	for (uint_t i = 0; i < ARRAY_SIZE(x86fset_to_av3); i++) {
5675 		if (is_x86_feature(x86_featureset,
5676 		    x86fset_to_av3[i].avm_feat)) {
5677 			hwcap_flags_3 |= x86fset_to_av3[i].avm_av;
5678 		}
5679 	}
5680 
5681 	/*
5682 	 * From here on out we're working through features that don't have
5683 	 * corresponding kernel feature flags for various reasons that are
5684 	 * mostly just due to the historical implementation.
5685 	 */
5686 	if (cpi->cpi_maxeax >= 1) {
5687 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
5688 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
5689 
5690 		*edx = CPI_FEATURES_EDX(cpi);
5691 		*ecx = CPI_FEATURES_ECX(cpi);
5692 
5693 		/*
5694 		 * [no explicit support required beyond x87 fp context]
5695 		 */
5696 		if (!fpu_exists)
5697 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
5698 
5699 		/*
5700 		 * Now map the supported feature vector to things that we
5701 		 * think userland will care about.
5702 		 */
5703 		if (*ecx & CPUID_INTC_ECX_MOVBE)
5704 			hwcap_flags |= AV_386_MOVBE;
5705 
5706 		if (*ecx & CPUID_INTC_ECX_POPCNT)
5707 			hwcap_flags |= AV_386_POPCNT;
5708 		if (*edx & CPUID_INTC_EDX_FPU)
5709 			hwcap_flags |= AV_386_FPU;
5710 		if (*edx & CPUID_INTC_EDX_MMX)
5711 			hwcap_flags |= AV_386_MMX;
5712 		if (*edx & CPUID_INTC_EDX_TSC)
5713 			hwcap_flags |= AV_386_TSC;
5714 	}
5715 
5716 	/*
5717 	 * Check a few miscellaneous features.
5718 	 */
5719 	if (cpi->cpi_xmaxeax < 0x80000001)
5720 		goto resolve_done;
5721 
5722 	switch (cpi->cpi_vendor) {
5723 		uint32_t *edx, *ecx;
5724 
5725 	case X86_VENDOR_Intel:
5726 		/*
5727 		 * Seems like Intel duplicated what we necessary
5728 		 * here to make the initial crop of 64-bit OS's work.
5729 		 * Hopefully, those are the only "extended" bits
5730 		 * they'll add.
5731 		 */
5732 		/*FALLTHROUGH*/
5733 
5734 	case X86_VENDOR_AMD:
5735 	case X86_VENDOR_HYGON:
5736 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
5737 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
5738 
5739 		*edx = CPI_FEATURES_XTD_EDX(cpi);
5740 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
5741 
5742 		/*
5743 		 * [no explicit support required beyond
5744 		 * x87 fp context and exception handlers]
5745 		 */
5746 		if (!fpu_exists)
5747 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
5748 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
5749 
5750 		/*
5751 		 * Now map the supported feature vector to
5752 		 * things that we think userland will care about.
5753 		 */
5754 		if (*edx & CPUID_AMD_EDX_MMXamd)
5755 			hwcap_flags |= AV_386_AMD_MMX;
5756 		if (*edx & CPUID_AMD_EDX_3DNow)
5757 			hwcap_flags |= AV_386_AMD_3DNow;
5758 		if (*edx & CPUID_AMD_EDX_3DNowx)
5759 			hwcap_flags |= AV_386_AMD_3DNowx;
5760 
5761 		switch (cpi->cpi_vendor) {
5762 		case X86_VENDOR_AMD:
5763 		case X86_VENDOR_HYGON:
5764 			if (*ecx & CPUID_AMD_ECX_AHF64)
5765 				hwcap_flags |= AV_386_AHF;
5766 			if (*ecx & CPUID_AMD_ECX_LZCNT)
5767 				hwcap_flags |= AV_386_AMD_LZCNT;
5768 			break;
5769 
5770 		case X86_VENDOR_Intel:
5771 			if (*ecx & CPUID_AMD_ECX_LZCNT)
5772 				hwcap_flags |= AV_386_AMD_LZCNT;
5773 			/*
5774 			 * Aarrgh.
5775 			 * Intel uses a different bit in the same word.
5776 			 */
5777 			if (*ecx & CPUID_INTC_ECX_AHF64)
5778 				hwcap_flags |= AV_386_AHF;
5779 			break;
5780 		default:
5781 			break;
5782 		}
5783 		break;
5784 
5785 	default:
5786 		break;
5787 	}
5788 
5789 resolve_done:
5790 	if (hwcap_out != NULL) {
5791 		hwcap_out[0] = hwcap_flags;
5792 		hwcap_out[1] = hwcap_flags_2;
5793 		hwcap_out[2] = hwcap_flags_3;
5794 	}
5795 }
5796 
5797 
5798 /*
5799  * Simulate the cpuid instruction using the data we previously
5800  * captured about this CPU.  We try our best to return the truth
5801  * about the hardware, independently of kernel support.
5802  */
5803 uint32_t
5804 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
5805 {
5806 	struct cpuid_info *cpi;
5807 	struct cpuid_regs *xcp;
5808 
5809 	if (cpu == NULL)
5810 		cpu = CPU;
5811 	cpi = cpu->cpu_m.mcpu_cpi;
5812 
5813 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_DYNAMIC));
5814 
5815 	/*
5816 	 * CPUID data is cached in two separate places: cpi_std for standard
5817 	 * CPUID leaves , and cpi_extd for extended CPUID leaves.
5818 	 */
5819 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD) {
5820 		xcp = &cpi->cpi_std[cp->cp_eax];
5821 	} else if (cp->cp_eax >= CPUID_LEAF_EXT_0 &&
5822 	    cp->cp_eax <= cpi->cpi_xmaxeax &&
5823 	    cp->cp_eax < CPUID_LEAF_EXT_0 + NMAX_CPI_EXTD) {
5824 		xcp = &cpi->cpi_extd[cp->cp_eax - CPUID_LEAF_EXT_0];
5825 	} else {
5826 		/*
5827 		 * The caller is asking for data from an input parameter which
5828 		 * the kernel has not cached.  In this case we go fetch from
5829 		 * the hardware and return the data directly to the user.
5830 		 */
5831 		return (__cpuid_insn(cp));
5832 	}
5833 
5834 	cp->cp_eax = xcp->cp_eax;
5835 	cp->cp_ebx = xcp->cp_ebx;
5836 	cp->cp_ecx = xcp->cp_ecx;
5837 	cp->cp_edx = xcp->cp_edx;
5838 	return (cp->cp_eax);
5839 }
5840 
5841 boolean_t
5842 cpuid_checkpass(const cpu_t *const cpu, const cpuid_pass_t pass)
5843 {
5844 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
5845 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
5846 }
5847 
5848 int
5849 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
5850 {
5851 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_DYNAMIC));
5852 
5853 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
5854 }
5855 
5856 int
5857 cpuid_is_cmt(cpu_t *cpu)
5858 {
5859 	if (cpu == NULL)
5860 		cpu = CPU;
5861 
5862 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
5863 
5864 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
5865 }
5866 
5867 /*
5868  * AMD and Intel both implement the 64-bit variant of the syscall
5869  * instruction (syscallq), so if there's -any- support for syscall,
5870  * cpuid currently says "yes, we support this".
5871  *
5872  * However, Intel decided to -not- implement the 32-bit variant of the
5873  * syscall instruction, so we provide a predicate to allow our caller
5874  * to test that subtlety here.
5875  *
5876  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
5877  *	even in the case where the hardware would in fact support it.
5878  */
5879 /*ARGSUSED*/
5880 int
5881 cpuid_syscall32_insn(cpu_t *cpu)
5882 {
5883 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), CPUID_PASS_BASIC));
5884 
5885 #if !defined(__xpv)
5886 	if (cpu == NULL)
5887 		cpu = CPU;
5888 
5889 	/*CSTYLED*/
5890 	{
5891 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
5892 
5893 		if ((cpi->cpi_vendor == X86_VENDOR_AMD ||
5894 		    cpi->cpi_vendor == X86_VENDOR_HYGON) &&
5895 		    cpi->cpi_xmaxeax >= 0x80000001 &&
5896 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
5897 			return (1);
5898 	}
5899 #endif
5900 	return (0);
5901 }
5902 
5903 int
5904 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
5905 {
5906 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
5907 
5908 	static const char fmt[] =
5909 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
5910 	static const char fmt_ht[] =
5911 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
5912 
5913 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
5914 
5915 	if (cpuid_is_cmt(cpu))
5916 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
5917 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
5918 		    cpi->cpi_family, cpi->cpi_model,
5919 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
5920 	return (snprintf(s, n, fmt,
5921 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
5922 	    cpi->cpi_family, cpi->cpi_model,
5923 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
5924 }
5925 
5926 const char *
5927 cpuid_getvendorstr(cpu_t *cpu)
5928 {
5929 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
5930 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
5931 }
5932 
5933 uint_t
5934 cpuid_getvendor(cpu_t *cpu)
5935 {
5936 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
5937 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
5938 }
5939 
5940 uint_t
5941 cpuid_getfamily(cpu_t *cpu)
5942 {
5943 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
5944 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
5945 }
5946 
5947 uint_t
5948 cpuid_getmodel(cpu_t *cpu)
5949 {
5950 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
5951 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
5952 }
5953 
5954 uint_t
5955 cpuid_get_ncpu_per_chip(cpu_t *cpu)
5956 {
5957 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
5958 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
5959 }
5960 
5961 uint_t
5962 cpuid_get_ncore_per_chip(cpu_t *cpu)
5963 {
5964 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
5965 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
5966 }
5967 
5968 uint_t
5969 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
5970 {
5971 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_EXTENDED));
5972 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
5973 }
5974 
5975 id_t
5976 cpuid_get_last_lvl_cacheid(cpu_t *cpu)
5977 {
5978 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_EXTENDED));
5979 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
5980 }
5981 
5982 uint_t
5983 cpuid_getstep(cpu_t *cpu)
5984 {
5985 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
5986 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
5987 }
5988 
5989 uint_t
5990 cpuid_getsig(struct cpu *cpu)
5991 {
5992 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
5993 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
5994 }
5995 
5996 uint32_t
5997 cpuid_getchiprev(struct cpu *cpu)
5998 {
5999 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
6000 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
6001 }
6002 
6003 const char *
6004 cpuid_getchiprevstr(struct cpu *cpu)
6005 {
6006 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
6007 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
6008 }
6009 
6010 uint32_t
6011 cpuid_getsockettype(struct cpu *cpu)
6012 {
6013 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
6014 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
6015 }
6016 
6017 const char *
6018 cpuid_getsocketstr(cpu_t *cpu)
6019 {
6020 	static const char *socketstr = NULL;
6021 	struct cpuid_info *cpi;
6022 
6023 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_IDENT));
6024 	cpi = cpu->cpu_m.mcpu_cpi;
6025 
6026 	/* Assume that socket types are the same across the system */
6027 	if (socketstr == NULL)
6028 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
6029 		    cpi->cpi_model, cpi->cpi_step);
6030 
6031 
6032 	return (socketstr);
6033 }
6034 
6035 x86_uarchrev_t
6036 cpuid_getuarchrev(cpu_t *cpu)
6037 {
6038 	return (cpu->cpu_m.mcpu_cpi->cpi_uarchrev);
6039 }
6040 
6041 int
6042 cpuid_get_chipid(cpu_t *cpu)
6043 {
6044 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6045 
6046 	if (cpuid_is_cmt(cpu))
6047 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
6048 	return (cpu->cpu_id);
6049 }
6050 
6051 id_t
6052 cpuid_get_coreid(cpu_t *cpu)
6053 {
6054 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6055 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
6056 }
6057 
6058 int
6059 cpuid_get_pkgcoreid(cpu_t *cpu)
6060 {
6061 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6062 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
6063 }
6064 
6065 int
6066 cpuid_get_clogid(cpu_t *cpu)
6067 {
6068 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6069 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
6070 }
6071 
6072 int
6073 cpuid_get_cacheid(cpu_t *cpu)
6074 {
6075 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6076 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
6077 }
6078 
6079 uint_t
6080 cpuid_get_procnodeid(cpu_t *cpu)
6081 {
6082 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6083 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
6084 }
6085 
6086 uint_t
6087 cpuid_get_procnodes_per_pkg(cpu_t *cpu)
6088 {
6089 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6090 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
6091 }
6092 
6093 uint_t
6094 cpuid_get_compunitid(cpu_t *cpu)
6095 {
6096 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6097 	return (cpu->cpu_m.mcpu_cpi->cpi_compunitid);
6098 }
6099 
6100 uint_t
6101 cpuid_get_cores_per_compunit(cpu_t *cpu)
6102 {
6103 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6104 	return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit);
6105 }
6106 
6107 uint32_t
6108 cpuid_get_apicid(cpu_t *cpu)
6109 {
6110 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6111 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
6112 		return (UINT32_MAX);
6113 	} else {
6114 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
6115 	}
6116 }
6117 
6118 void
6119 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
6120 {
6121 	struct cpuid_info *cpi;
6122 
6123 	if (cpu == NULL)
6124 		cpu = CPU;
6125 	cpi = cpu->cpu_m.mcpu_cpi;
6126 
6127 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6128 
6129 	if (pabits)
6130 		*pabits = cpi->cpi_pabits;
6131 	if (vabits)
6132 		*vabits = cpi->cpi_vabits;
6133 }
6134 
6135 size_t
6136 cpuid_get_xsave_size(void)
6137 {
6138 	return (MAX(cpuid_info0.cpi_xsave.xsav_max_size,
6139 	    sizeof (struct xsave_state)));
6140 }
6141 
6142 /*
6143  * Export information about known offsets to the kernel. We only care about
6144  * things we have actually enabled support for in %xcr0.
6145  */
6146 void
6147 cpuid_get_xsave_info(uint64_t bit, size_t *sizep, size_t *offp)
6148 {
6149 	size_t size, off;
6150 
6151 	VERIFY3U(bit & xsave_bv_all, !=, 0);
6152 
6153 	if (sizep == NULL)
6154 		sizep = &size;
6155 	if (offp == NULL)
6156 		offp = &off;
6157 
6158 	switch (bit) {
6159 	case XFEATURE_LEGACY_FP:
6160 	case XFEATURE_SSE:
6161 		*sizep = sizeof (struct fxsave_state);
6162 		*offp = 0;
6163 		break;
6164 	case XFEATURE_AVX:
6165 		*sizep = cpuid_info0.cpi_xsave.ymm_size;
6166 		*offp = cpuid_info0.cpi_xsave.ymm_offset;
6167 		break;
6168 	case XFEATURE_AVX512_OPMASK:
6169 		*sizep = cpuid_info0.cpi_xsave.opmask_size;
6170 		*offp = cpuid_info0.cpi_xsave.opmask_offset;
6171 		break;
6172 	case XFEATURE_AVX512_ZMM:
6173 		*sizep = cpuid_info0.cpi_xsave.zmmlo_size;
6174 		*offp = cpuid_info0.cpi_xsave.zmmlo_offset;
6175 		break;
6176 	case XFEATURE_AVX512_HI_ZMM:
6177 		*sizep = cpuid_info0.cpi_xsave.zmmhi_size;
6178 		*offp = cpuid_info0.cpi_xsave.zmmhi_offset;
6179 		break;
6180 	default:
6181 		panic("asked for unsupported xsave feature: 0x%lx", bit);
6182 	}
6183 }
6184 
6185 /*
6186  * Return true if the CPUs on this system require 'pointer clearing' for the
6187  * floating point error pointer exception handling. In the past, this has been
6188  * true for all AMD K7 & K8 CPUs, although newer AMD CPUs have been changed to
6189  * behave the same as Intel. This is checked via the CPUID_AMD_EBX_ERR_PTR_ZERO
6190  * feature bit and is reflected in the cpi_fp_amd_save member.
6191  */
6192 boolean_t
6193 cpuid_need_fp_excp_handling(void)
6194 {
6195 	return (cpuid_info0.cpi_vendor == X86_VENDOR_AMD &&
6196 	    cpuid_info0.cpi_fp_amd_save != 0);
6197 }
6198 
6199 /*
6200  * Returns the number of data TLB entries for a corresponding
6201  * pagesize.  If it can't be computed, or isn't known, the
6202  * routine returns zero.  If you ask about an architecturally
6203  * impossible pagesize, the routine will panic (so that the
6204  * hat implementor knows that things are inconsistent.)
6205  */
6206 uint_t
6207 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
6208 {
6209 	struct cpuid_info *cpi;
6210 	uint_t dtlb_nent = 0;
6211 
6212 	if (cpu == NULL)
6213 		cpu = CPU;
6214 	cpi = cpu->cpu_m.mcpu_cpi;
6215 
6216 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6217 
6218 	/*
6219 	 * Check the L2 TLB info
6220 	 */
6221 	if (cpi->cpi_xmaxeax >= 0x80000006) {
6222 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
6223 
6224 		switch (pagesize) {
6225 
6226 		case 4 * 1024:
6227 			/*
6228 			 * All zero in the top 16 bits of the register
6229 			 * indicates a unified TLB. Size is in low 16 bits.
6230 			 */
6231 			if ((cp->cp_ebx & 0xffff0000) == 0)
6232 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
6233 			else
6234 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
6235 			break;
6236 
6237 		case 2 * 1024 * 1024:
6238 			if ((cp->cp_eax & 0xffff0000) == 0)
6239 				dtlb_nent = cp->cp_eax & 0x0000ffff;
6240 			else
6241 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
6242 			break;
6243 
6244 		default:
6245 			panic("unknown L2 pagesize");
6246 			/*NOTREACHED*/
6247 		}
6248 	}
6249 
6250 	if (dtlb_nent != 0)
6251 		return (dtlb_nent);
6252 
6253 	/*
6254 	 * No L2 TLB support for this size, try L1.
6255 	 */
6256 	if (cpi->cpi_xmaxeax >= 0x80000005) {
6257 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
6258 
6259 		switch (pagesize) {
6260 		case 4 * 1024:
6261 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
6262 			break;
6263 		case 2 * 1024 * 1024:
6264 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
6265 			break;
6266 		default:
6267 			panic("unknown L1 d-TLB pagesize");
6268 			/*NOTREACHED*/
6269 		}
6270 	}
6271 
6272 	return (dtlb_nent);
6273 }
6274 
6275 /*
6276  * Return 0 if the erratum is not present or not applicable, positive
6277  * if it is, and negative if the status of the erratum is unknown.
6278  *
6279  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
6280  * Processors" #25759, Rev 3.57, August 2005
6281  */
6282 int
6283 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
6284 {
6285 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
6286 	uint_t eax;
6287 
6288 	/*
6289 	 * Bail out if this CPU isn't an AMD CPU, or if it's
6290 	 * a legacy (32-bit) AMD CPU.
6291 	 */
6292 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
6293 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
6294 	    cpi->cpi_family == 6) {
6295 		return (0);
6296 	}
6297 
6298 	eax = cpi->cpi_std[1].cp_eax;
6299 
6300 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
6301 #define	SH_B3(eax)	(eax == 0xf51)
6302 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
6303 
6304 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
6305 
6306 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
6307 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
6308 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
6309 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
6310 
6311 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
6312 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
6313 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
6314 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
6315 
6316 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
6317 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
6318 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
6319 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
6320 #define	BH_E4(eax)	(eax == 0x20fb1)
6321 #define	SH_E5(eax)	(eax == 0x20f42)
6322 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
6323 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
6324 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
6325 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
6326 			    DH_E6(eax) || JH_E6(eax))
6327 
6328 #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
6329 #define	DR_B0(eax)	(eax == 0x100f20)
6330 #define	DR_B1(eax)	(eax == 0x100f21)
6331 #define	DR_BA(eax)	(eax == 0x100f2a)
6332 #define	DR_B2(eax)	(eax == 0x100f22)
6333 #define	DR_B3(eax)	(eax == 0x100f23)
6334 #define	RB_C0(eax)	(eax == 0x100f40)
6335 
6336 	switch (erratum) {
6337 	case 1:
6338 		return (cpi->cpi_family < 0x10);
6339 	case 51:	/* what does the asterisk mean? */
6340 		return (B(eax) || SH_C0(eax) || CG(eax));
6341 	case 52:
6342 		return (B(eax));
6343 	case 57:
6344 		return (cpi->cpi_family <= 0x11);
6345 	case 58:
6346 		return (B(eax));
6347 	case 60:
6348 		return (cpi->cpi_family <= 0x11);
6349 	case 61:
6350 	case 62:
6351 	case 63:
6352 	case 64:
6353 	case 65:
6354 	case 66:
6355 	case 68:
6356 	case 69:
6357 	case 70:
6358 	case 71:
6359 		return (B(eax));
6360 	case 72:
6361 		return (SH_B0(eax));
6362 	case 74:
6363 		return (B(eax));
6364 	case 75:
6365 		return (cpi->cpi_family < 0x10);
6366 	case 76:
6367 		return (B(eax));
6368 	case 77:
6369 		return (cpi->cpi_family <= 0x11);
6370 	case 78:
6371 		return (B(eax) || SH_C0(eax));
6372 	case 79:
6373 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
6374 	case 80:
6375 	case 81:
6376 	case 82:
6377 		return (B(eax));
6378 	case 83:
6379 		return (B(eax) || SH_C0(eax) || CG(eax));
6380 	case 85:
6381 		return (cpi->cpi_family < 0x10);
6382 	case 86:
6383 		return (SH_C0(eax) || CG(eax));
6384 	case 88:
6385 		return (B(eax) || SH_C0(eax));
6386 	case 89:
6387 		return (cpi->cpi_family < 0x10);
6388 	case 90:
6389 		return (B(eax) || SH_C0(eax) || CG(eax));
6390 	case 91:
6391 	case 92:
6392 		return (B(eax) || SH_C0(eax));
6393 	case 93:
6394 		return (SH_C0(eax));
6395 	case 94:
6396 		return (B(eax) || SH_C0(eax) || CG(eax));
6397 	case 95:
6398 		return (B(eax) || SH_C0(eax));
6399 	case 96:
6400 		return (B(eax) || SH_C0(eax) || CG(eax));
6401 	case 97:
6402 	case 98:
6403 		return (SH_C0(eax) || CG(eax));
6404 	case 99:
6405 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
6406 	case 100:
6407 		return (B(eax) || SH_C0(eax));
6408 	case 101:
6409 	case 103:
6410 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
6411 	case 104:
6412 		return (SH_C0(eax) || CG(eax) || D0(eax));
6413 	case 105:
6414 	case 106:
6415 	case 107:
6416 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
6417 	case 108:
6418 		return (DH_CG(eax));
6419 	case 109:
6420 		return (SH_C0(eax) || CG(eax) || D0(eax));
6421 	case 110:
6422 		return (D0(eax) || EX(eax));
6423 	case 111:
6424 		return (CG(eax));
6425 	case 112:
6426 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
6427 	case 113:
6428 		return (eax == 0x20fc0);
6429 	case 114:
6430 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
6431 	case 115:
6432 		return (SH_E0(eax) || JH_E1(eax));
6433 	case 116:
6434 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
6435 	case 117:
6436 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
6437 	case 118:
6438 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
6439 		    JH_E6(eax));
6440 	case 121:
6441 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
6442 	case 122:
6443 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
6444 	case 123:
6445 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
6446 	case 131:
6447 		return (cpi->cpi_family < 0x10);
6448 	case 6336786:
6449 
6450 		/*
6451 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
6452 		 * if this is a K8 family or newer processor. We're testing for
6453 		 * this 'erratum' to determine whether or not we have a constant
6454 		 * TSC.
6455 		 *
6456 		 * Our current fix for this is to disable the C1-Clock ramping.
6457 		 * However, this doesn't work on newer processor families nor
6458 		 * does it work when virtualized as those devices don't exist.
6459 		 */
6460 		if (cpi->cpi_family >= 0x12 || get_hwenv() != HW_NATIVE) {
6461 			return (0);
6462 		}
6463 
6464 		if (CPI_FAMILY(cpi) == 0xf) {
6465 			struct cpuid_regs regs;
6466 			regs.cp_eax = 0x80000007;
6467 			(void) __cpuid_insn(&regs);
6468 			return (!(regs.cp_edx & 0x100));
6469 		}
6470 		return (0);
6471 	case 147:
6472 		/*
6473 		 * This erratum (K8 #147) is not present on family 10 and newer.
6474 		 */
6475 		if (cpi->cpi_family >= 0x10) {
6476 			return (0);
6477 		}
6478 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
6479 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
6480 
6481 	case 6671130:
6482 		/*
6483 		 * check for processors (pre-Shanghai) that do not provide
6484 		 * optimal management of 1gb ptes in its tlb.
6485 		 */
6486 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
6487 
6488 	case 298:
6489 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
6490 		    DR_B2(eax) || RB_C0(eax));
6491 
6492 	case 721:
6493 		return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12);
6494 
6495 	default:
6496 		return (-1);
6497 
6498 	}
6499 }
6500 
6501 /*
6502  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
6503  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
6504  */
6505 int
6506 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
6507 {
6508 	struct cpuid_info	*cpi;
6509 	uint_t			osvwid;
6510 	static int		osvwfeature = -1;
6511 	uint64_t		osvwlength;
6512 
6513 
6514 	cpi = cpu->cpu_m.mcpu_cpi;
6515 
6516 	/* confirm OSVW supported */
6517 	if (osvwfeature == -1) {
6518 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
6519 	} else {
6520 		/* assert that osvw feature setting is consistent on all cpus */
6521 		ASSERT(osvwfeature ==
6522 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
6523 	}
6524 	if (!osvwfeature)
6525 		return (-1);
6526 
6527 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
6528 
6529 	switch (erratum) {
6530 	case 298:	/* osvwid is 0 */
6531 		osvwid = 0;
6532 		if (osvwlength <= (uint64_t)osvwid) {
6533 			/* osvwid 0 is unknown */
6534 			return (-1);
6535 		}
6536 
6537 		/*
6538 		 * Check the OSVW STATUS MSR to determine the state
6539 		 * of the erratum where:
6540 		 *   0 - fixed by HW
6541 		 *   1 - BIOS has applied the workaround when BIOS
6542 		 *   workaround is available. (Or for other errata,
6543 		 *   OS workaround is required.)
6544 		 * For a value of 1, caller will confirm that the
6545 		 * erratum 298 workaround has indeed been applied by BIOS.
6546 		 *
6547 		 * A 1 may be set in cpus that have a HW fix
6548 		 * in a mixed cpu system. Regarding erratum 298:
6549 		 *   In a multiprocessor platform, the workaround above
6550 		 *   should be applied to all processors regardless of
6551 		 *   silicon revision when an affected processor is
6552 		 *   present.
6553 		 */
6554 
6555 		return (rdmsr(MSR_AMD_OSVW_STATUS +
6556 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
6557 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
6558 
6559 	default:
6560 		return (-1);
6561 	}
6562 }
6563 
6564 static const char assoc_str[] = "associativity";
6565 static const char line_str[] = "line-size";
6566 static const char size_str[] = "size";
6567 
6568 static void
6569 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
6570     uint32_t val)
6571 {
6572 	char buf[128];
6573 
6574 	/*
6575 	 * ndi_prop_update_int() is used because it is desirable for
6576 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
6577 	 */
6578 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
6579 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
6580 }
6581 
6582 /*
6583  * Intel-style cache/tlb description
6584  *
6585  * Standard cpuid level 2 gives a randomly ordered
6586  * selection of tags that index into a table that describes
6587  * cache and tlb properties.
6588  */
6589 
6590 static const char l1_icache_str[] = "l1-icache";
6591 static const char l1_dcache_str[] = "l1-dcache";
6592 static const char l2_cache_str[] = "l2-cache";
6593 static const char l3_cache_str[] = "l3-cache";
6594 static const char itlb4k_str[] = "itlb-4K";
6595 static const char dtlb4k_str[] = "dtlb-4K";
6596 static const char itlb2M_str[] = "itlb-2M";
6597 static const char itlb4M_str[] = "itlb-4M";
6598 static const char dtlb4M_str[] = "dtlb-4M";
6599 static const char dtlb24_str[] = "dtlb0-2M-4M";
6600 static const char itlb424_str[] = "itlb-4K-2M-4M";
6601 static const char itlb24_str[] = "itlb-2M-4M";
6602 static const char dtlb44_str[] = "dtlb-4K-4M";
6603 static const char sl1_dcache_str[] = "sectored-l1-dcache";
6604 static const char sl2_cache_str[] = "sectored-l2-cache";
6605 static const char itrace_str[] = "itrace-cache";
6606 static const char sl3_cache_str[] = "sectored-l3-cache";
6607 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
6608 
6609 static const struct cachetab {
6610 	uint8_t		ct_code;
6611 	uint8_t		ct_assoc;
6612 	uint16_t	ct_line_size;
6613 	size_t		ct_size;
6614 	const char	*ct_label;
6615 } intel_ctab[] = {
6616 	/*
6617 	 * maintain descending order!
6618 	 *
6619 	 * Codes ignored - Reason
6620 	 * ----------------------
6621 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
6622 	 * f0H/f1H - Currently we do not interpret prefetch size by design
6623 	 */
6624 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
6625 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
6626 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
6627 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
6628 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
6629 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
6630 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
6631 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
6632 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
6633 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
6634 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
6635 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
6636 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
6637 	{ 0xc0, 4, 0, 8, dtlb44_str },
6638 	{ 0xba, 4, 0, 64, dtlb4k_str },
6639 	{ 0xb4, 4, 0, 256, dtlb4k_str },
6640 	{ 0xb3, 4, 0, 128, dtlb4k_str },
6641 	{ 0xb2, 4, 0, 64, itlb4k_str },
6642 	{ 0xb0, 4, 0, 128, itlb4k_str },
6643 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
6644 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
6645 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
6646 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
6647 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
6648 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
6649 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
6650 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
6651 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
6652 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
6653 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
6654 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
6655 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
6656 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
6657 	{ 0x73, 8, 0, 64*1024, itrace_str},
6658 	{ 0x72, 8, 0, 32*1024, itrace_str},
6659 	{ 0x71, 8, 0, 16*1024, itrace_str},
6660 	{ 0x70, 8, 0, 12*1024, itrace_str},
6661 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
6662 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
6663 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
6664 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
6665 	{ 0x5d, 0, 0, 256, dtlb44_str},
6666 	{ 0x5c, 0, 0, 128, dtlb44_str},
6667 	{ 0x5b, 0, 0, 64, dtlb44_str},
6668 	{ 0x5a, 4, 0, 32, dtlb24_str},
6669 	{ 0x59, 0, 0, 16, dtlb4k_str},
6670 	{ 0x57, 4, 0, 16, dtlb4k_str},
6671 	{ 0x56, 4, 0, 16, dtlb4M_str},
6672 	{ 0x55, 0, 0, 7, itlb24_str},
6673 	{ 0x52, 0, 0, 256, itlb424_str},
6674 	{ 0x51, 0, 0, 128, itlb424_str},
6675 	{ 0x50, 0, 0, 64, itlb424_str},
6676 	{ 0x4f, 0, 0, 32, itlb4k_str},
6677 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
6678 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
6679 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
6680 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
6681 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
6682 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
6683 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
6684 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
6685 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
6686 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
6687 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
6688 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
6689 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
6690 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
6691 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
6692 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
6693 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
6694 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
6695 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
6696 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
6697 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
6698 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
6699 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
6700 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
6701 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
6702 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
6703 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
6704 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
6705 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
6706 	{ 0x0b, 4, 0, 4, itlb4M_str},
6707 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
6708 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
6709 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
6710 	{ 0x05, 4, 0, 32, dtlb4M_str},
6711 	{ 0x04, 4, 0, 8, dtlb4M_str},
6712 	{ 0x03, 4, 0, 64, dtlb4k_str},
6713 	{ 0x02, 4, 0, 2, itlb4M_str},
6714 	{ 0x01, 4, 0, 32, itlb4k_str},
6715 	{ 0 }
6716 };
6717 
6718 static const struct cachetab cyrix_ctab[] = {
6719 	{ 0x70, 4, 0, 32, "tlb-4K" },
6720 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
6721 	{ 0 }
6722 };
6723 
6724 /*
6725  * Search a cache table for a matching entry
6726  */
6727 static const struct cachetab *
6728 find_cacheent(const struct cachetab *ct, uint_t code)
6729 {
6730 	if (code != 0) {
6731 		for (; ct->ct_code != 0; ct++)
6732 			if (ct->ct_code <= code)
6733 				break;
6734 		if (ct->ct_code == code)
6735 			return (ct);
6736 	}
6737 	return (NULL);
6738 }
6739 
6740 /*
6741  * Populate cachetab entry with L2 or L3 cache-information using
6742  * cpuid function 4. This function is called from intel_walk_cacheinfo()
6743  * when descriptor 0x49 is encountered. It returns 0 if no such cache
6744  * information is found.
6745  */
6746 static int
6747 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
6748 {
6749 	uint32_t level, i;
6750 	int ret = 0;
6751 
6752 	for (i = 0; i < cpi->cpi_cache_leaf_size; i++) {
6753 		level = CPI_CACHE_LVL(cpi->cpi_cache_leaves[i]);
6754 
6755 		if (level == 2 || level == 3) {
6756 			ct->ct_assoc =
6757 			    CPI_CACHE_WAYS(cpi->cpi_cache_leaves[i]) + 1;
6758 			ct->ct_line_size =
6759 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_cache_leaves[i]) + 1;
6760 			ct->ct_size = ct->ct_assoc *
6761 			    (CPI_CACHE_PARTS(cpi->cpi_cache_leaves[i]) + 1) *
6762 			    ct->ct_line_size *
6763 			    (cpi->cpi_cache_leaves[i]->cp_ecx + 1);
6764 
6765 			if (level == 2) {
6766 				ct->ct_label = l2_cache_str;
6767 			} else if (level == 3) {
6768 				ct->ct_label = l3_cache_str;
6769 			}
6770 			ret = 1;
6771 		}
6772 	}
6773 
6774 	return (ret);
6775 }
6776 
6777 /*
6778  * Walk the cacheinfo descriptor, applying 'func' to every valid element
6779  * The walk is terminated if the walker returns non-zero.
6780  */
6781 static void
6782 intel_walk_cacheinfo(struct cpuid_info *cpi,
6783     void *arg, int (*func)(void *, const struct cachetab *))
6784 {
6785 	const struct cachetab *ct;
6786 	struct cachetab des_49_ct, des_b1_ct;
6787 	uint8_t *dp;
6788 	int i;
6789 
6790 	if ((dp = cpi->cpi_cacheinfo) == NULL)
6791 		return;
6792 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
6793 		/*
6794 		 * For overloaded descriptor 0x49 we use cpuid function 4
6795 		 * if supported by the current processor, to create
6796 		 * cache information.
6797 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
6798 		 * to disambiguate the cache information.
6799 		 */
6800 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
6801 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
6802 				ct = &des_49_ct;
6803 		} else if (*dp == 0xb1) {
6804 			des_b1_ct.ct_code = 0xb1;
6805 			des_b1_ct.ct_assoc = 4;
6806 			des_b1_ct.ct_line_size = 0;
6807 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
6808 				des_b1_ct.ct_size = 8;
6809 				des_b1_ct.ct_label = itlb2M_str;
6810 			} else {
6811 				des_b1_ct.ct_size = 4;
6812 				des_b1_ct.ct_label = itlb4M_str;
6813 			}
6814 			ct = &des_b1_ct;
6815 		} else {
6816 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
6817 				continue;
6818 			}
6819 		}
6820 
6821 		if (func(arg, ct) != 0) {
6822 			break;
6823 		}
6824 	}
6825 }
6826 
6827 /*
6828  * (Like the Intel one, except for Cyrix CPUs)
6829  */
6830 static void
6831 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
6832     void *arg, int (*func)(void *, const struct cachetab *))
6833 {
6834 	const struct cachetab *ct;
6835 	uint8_t *dp;
6836 	int i;
6837 
6838 	if ((dp = cpi->cpi_cacheinfo) == NULL)
6839 		return;
6840 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
6841 		/*
6842 		 * Search Cyrix-specific descriptor table first ..
6843 		 */
6844 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
6845 			if (func(arg, ct) != 0)
6846 				break;
6847 			continue;
6848 		}
6849 		/*
6850 		 * .. else fall back to the Intel one
6851 		 */
6852 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
6853 			if (func(arg, ct) != 0)
6854 				break;
6855 			continue;
6856 		}
6857 	}
6858 }
6859 
6860 /*
6861  * A cacheinfo walker that adds associativity, line-size, and size properties
6862  * to the devinfo node it is passed as an argument.
6863  */
6864 static int
6865 add_cacheent_props(void *arg, const struct cachetab *ct)
6866 {
6867 	dev_info_t *devi = arg;
6868 
6869 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
6870 	if (ct->ct_line_size != 0)
6871 		add_cache_prop(devi, ct->ct_label, line_str,
6872 		    ct->ct_line_size);
6873 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
6874 	return (0);
6875 }
6876 
6877 
6878 static const char fully_assoc[] = "fully-associative?";
6879 
6880 /*
6881  * AMD style cache/tlb description
6882  *
6883  * Extended functions 5 and 6 directly describe properties of
6884  * tlbs and various cache levels.
6885  */
6886 static void
6887 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
6888 {
6889 	switch (assoc) {
6890 	case 0:	/* reserved; ignore */
6891 		break;
6892 	default:
6893 		add_cache_prop(devi, label, assoc_str, assoc);
6894 		break;
6895 	case 0xff:
6896 		add_cache_prop(devi, label, fully_assoc, 1);
6897 		break;
6898 	}
6899 }
6900 
6901 static void
6902 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
6903 {
6904 	if (size == 0)
6905 		return;
6906 	add_cache_prop(devi, label, size_str, size);
6907 	add_amd_assoc(devi, label, assoc);
6908 }
6909 
6910 static void
6911 add_amd_cache(dev_info_t *devi, const char *label,
6912     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
6913 {
6914 	if (size == 0 || line_size == 0)
6915 		return;
6916 	add_amd_assoc(devi, label, assoc);
6917 	/*
6918 	 * Most AMD parts have a sectored cache. Multiple cache lines are
6919 	 * associated with each tag. A sector consists of all cache lines
6920 	 * associated with a tag. For example, the AMD K6-III has a sector
6921 	 * size of 2 cache lines per tag.
6922 	 */
6923 	if (lines_per_tag != 0)
6924 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
6925 	add_cache_prop(devi, label, line_str, line_size);
6926 	add_cache_prop(devi, label, size_str, size * 1024);
6927 }
6928 
6929 static void
6930 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
6931 {
6932 	switch (assoc) {
6933 	case 0:	/* off */
6934 		break;
6935 	case 1:
6936 	case 2:
6937 	case 4:
6938 		add_cache_prop(devi, label, assoc_str, assoc);
6939 		break;
6940 	case 6:
6941 		add_cache_prop(devi, label, assoc_str, 8);
6942 		break;
6943 	case 8:
6944 		add_cache_prop(devi, label, assoc_str, 16);
6945 		break;
6946 	case 0xf:
6947 		add_cache_prop(devi, label, fully_assoc, 1);
6948 		break;
6949 	default: /* reserved; ignore */
6950 		break;
6951 	}
6952 }
6953 
6954 static void
6955 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
6956 {
6957 	if (size == 0 || assoc == 0)
6958 		return;
6959 	add_amd_l2_assoc(devi, label, assoc);
6960 	add_cache_prop(devi, label, size_str, size);
6961 }
6962 
6963 static void
6964 add_amd_l2_cache(dev_info_t *devi, const char *label,
6965     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
6966 {
6967 	if (size == 0 || assoc == 0 || line_size == 0)
6968 		return;
6969 	add_amd_l2_assoc(devi, label, assoc);
6970 	if (lines_per_tag != 0)
6971 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
6972 	add_cache_prop(devi, label, line_str, line_size);
6973 	add_cache_prop(devi, label, size_str, size * 1024);
6974 }
6975 
6976 static void
6977 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
6978 {
6979 	struct cpuid_regs *cp;
6980 
6981 	if (cpi->cpi_xmaxeax < 0x80000005)
6982 		return;
6983 	cp = &cpi->cpi_extd[5];
6984 
6985 	/*
6986 	 * 4M/2M L1 TLB configuration
6987 	 *
6988 	 * We report the size for 2M pages because AMD uses two
6989 	 * TLB entries for one 4M page.
6990 	 */
6991 	add_amd_tlb(devi, "dtlb-2M",
6992 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
6993 	add_amd_tlb(devi, "itlb-2M",
6994 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
6995 
6996 	/*
6997 	 * 4K L1 TLB configuration
6998 	 */
6999 
7000 	switch (cpi->cpi_vendor) {
7001 		uint_t nentries;
7002 	case X86_VENDOR_TM:
7003 		if (cpi->cpi_family >= 5) {
7004 			/*
7005 			 * Crusoe processors have 256 TLB entries, but
7006 			 * cpuid data format constrains them to only
7007 			 * reporting 255 of them.
7008 			 */
7009 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
7010 				nentries = 256;
7011 			/*
7012 			 * Crusoe processors also have a unified TLB
7013 			 */
7014 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
7015 			    nentries);
7016 			break;
7017 		}
7018 		/*FALLTHROUGH*/
7019 	default:
7020 		add_amd_tlb(devi, itlb4k_str,
7021 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
7022 		add_amd_tlb(devi, dtlb4k_str,
7023 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
7024 		break;
7025 	}
7026 
7027 	/*
7028 	 * data L1 cache configuration
7029 	 */
7030 
7031 	add_amd_cache(devi, l1_dcache_str,
7032 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
7033 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
7034 
7035 	/*
7036 	 * code L1 cache configuration
7037 	 */
7038 
7039 	add_amd_cache(devi, l1_icache_str,
7040 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
7041 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
7042 
7043 	if (cpi->cpi_xmaxeax < 0x80000006)
7044 		return;
7045 	cp = &cpi->cpi_extd[6];
7046 
7047 	/* Check for a unified L2 TLB for large pages */
7048 
7049 	if (BITX(cp->cp_eax, 31, 16) == 0)
7050 		add_amd_l2_tlb(devi, "l2-tlb-2M",
7051 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
7052 	else {
7053 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
7054 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
7055 		add_amd_l2_tlb(devi, "l2-itlb-2M",
7056 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
7057 	}
7058 
7059 	/* Check for a unified L2 TLB for 4K pages */
7060 
7061 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
7062 		add_amd_l2_tlb(devi, "l2-tlb-4K",
7063 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
7064 	} else {
7065 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
7066 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
7067 		add_amd_l2_tlb(devi, "l2-itlb-4K",
7068 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
7069 	}
7070 
7071 	add_amd_l2_cache(devi, l2_cache_str,
7072 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
7073 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
7074 }
7075 
7076 /*
7077  * There are two basic ways that the x86 world describes it cache
7078  * and tlb architecture - Intel's way and AMD's way.
7079  *
7080  * Return which flavor of cache architecture we should use
7081  */
7082 static int
7083 x86_which_cacheinfo(struct cpuid_info *cpi)
7084 {
7085 	switch (cpi->cpi_vendor) {
7086 	case X86_VENDOR_Intel:
7087 		if (cpi->cpi_maxeax >= 2)
7088 			return (X86_VENDOR_Intel);
7089 		break;
7090 	case X86_VENDOR_AMD:
7091 		/*
7092 		 * The K5 model 1 was the first part from AMD that reported
7093 		 * cache sizes via extended cpuid functions.
7094 		 */
7095 		if (cpi->cpi_family > 5 ||
7096 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
7097 			return (X86_VENDOR_AMD);
7098 		break;
7099 	case X86_VENDOR_HYGON:
7100 		return (X86_VENDOR_AMD);
7101 	case X86_VENDOR_TM:
7102 		if (cpi->cpi_family >= 5)
7103 			return (X86_VENDOR_AMD);
7104 		/*FALLTHROUGH*/
7105 	default:
7106 		/*
7107 		 * If they have extended CPU data for 0x80000005
7108 		 * then we assume they have AMD-format cache
7109 		 * information.
7110 		 *
7111 		 * If not, and the vendor happens to be Cyrix,
7112 		 * then try our-Cyrix specific handler.
7113 		 *
7114 		 * If we're not Cyrix, then assume we're using Intel's
7115 		 * table-driven format instead.
7116 		 */
7117 		if (cpi->cpi_xmaxeax >= 0x80000005)
7118 			return (X86_VENDOR_AMD);
7119 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
7120 			return (X86_VENDOR_Cyrix);
7121 		else if (cpi->cpi_maxeax >= 2)
7122 			return (X86_VENDOR_Intel);
7123 		break;
7124 	}
7125 	return (-1);
7126 }
7127 
7128 void
7129 cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
7130     struct cpuid_info *cpi)
7131 {
7132 	dev_info_t *cpu_devi;
7133 	int create;
7134 
7135 	cpu_devi = (dev_info_t *)dip;
7136 
7137 	/* device_type */
7138 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
7139 	    "device_type", "cpu");
7140 
7141 	/* reg */
7142 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7143 	    "reg", cpu_id);
7144 
7145 	/* cpu-mhz, and clock-frequency */
7146 	if (cpu_freq > 0) {
7147 		long long mul;
7148 
7149 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7150 		    "cpu-mhz", cpu_freq);
7151 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
7152 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7153 			    "clock-frequency", (int)mul);
7154 	}
7155 
7156 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
7157 
7158 	/* vendor-id */
7159 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
7160 	    "vendor-id", cpi->cpi_vendorstr);
7161 
7162 	if (cpi->cpi_maxeax == 0) {
7163 		return;
7164 	}
7165 
7166 	/*
7167 	 * family, model, and step
7168 	 */
7169 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7170 	    "family", CPI_FAMILY(cpi));
7171 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7172 	    "cpu-model", CPI_MODEL(cpi));
7173 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7174 	    "stepping-id", CPI_STEP(cpi));
7175 
7176 	/* type */
7177 	switch (cpi->cpi_vendor) {
7178 	case X86_VENDOR_Intel:
7179 		create = 1;
7180 		break;
7181 	default:
7182 		create = 0;
7183 		break;
7184 	}
7185 	if (create)
7186 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7187 		    "type", CPI_TYPE(cpi));
7188 
7189 	/* ext-family */
7190 	switch (cpi->cpi_vendor) {
7191 	case X86_VENDOR_Intel:
7192 	case X86_VENDOR_AMD:
7193 		create = cpi->cpi_family >= 0xf;
7194 		break;
7195 	case X86_VENDOR_HYGON:
7196 		create = 1;
7197 		break;
7198 	default:
7199 		create = 0;
7200 		break;
7201 	}
7202 	if (create)
7203 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7204 		    "ext-family", CPI_FAMILY_XTD(cpi));
7205 
7206 	/* ext-model */
7207 	switch (cpi->cpi_vendor) {
7208 	case X86_VENDOR_Intel:
7209 		create = IS_EXTENDED_MODEL_INTEL(cpi);
7210 		break;
7211 	case X86_VENDOR_AMD:
7212 		create = CPI_FAMILY(cpi) == 0xf;
7213 		break;
7214 	case X86_VENDOR_HYGON:
7215 		create = 1;
7216 		break;
7217 	default:
7218 		create = 0;
7219 		break;
7220 	}
7221 	if (create)
7222 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7223 		    "ext-model", CPI_MODEL_XTD(cpi));
7224 
7225 	/* generation */
7226 	switch (cpi->cpi_vendor) {
7227 	case X86_VENDOR_AMD:
7228 	case X86_VENDOR_HYGON:
7229 		/*
7230 		 * AMD K5 model 1 was the first part to support this
7231 		 */
7232 		create = cpi->cpi_xmaxeax >= 0x80000001;
7233 		break;
7234 	default:
7235 		create = 0;
7236 		break;
7237 	}
7238 	if (create)
7239 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7240 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
7241 
7242 	/* brand-id */
7243 	switch (cpi->cpi_vendor) {
7244 	case X86_VENDOR_Intel:
7245 		/*
7246 		 * brand id first appeared on Pentium III Xeon model 8,
7247 		 * and Celeron model 8 processors and Opteron
7248 		 */
7249 		create = cpi->cpi_family > 6 ||
7250 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
7251 		break;
7252 	case X86_VENDOR_AMD:
7253 		create = cpi->cpi_family >= 0xf;
7254 		break;
7255 	case X86_VENDOR_HYGON:
7256 		create = 1;
7257 		break;
7258 	default:
7259 		create = 0;
7260 		break;
7261 	}
7262 	if (create && cpi->cpi_brandid != 0) {
7263 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7264 		    "brand-id", cpi->cpi_brandid);
7265 	}
7266 
7267 	/* chunks, and apic-id */
7268 	switch (cpi->cpi_vendor) {
7269 		/*
7270 		 * first available on Pentium IV and Opteron (K8)
7271 		 */
7272 	case X86_VENDOR_Intel:
7273 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
7274 		break;
7275 	case X86_VENDOR_AMD:
7276 		create = cpi->cpi_family >= 0xf;
7277 		break;
7278 	case X86_VENDOR_HYGON:
7279 		create = 1;
7280 		break;
7281 	default:
7282 		create = 0;
7283 		break;
7284 	}
7285 	if (create) {
7286 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7287 		    "chunks", CPI_CHUNKS(cpi));
7288 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7289 		    "apic-id", cpi->cpi_apicid);
7290 		if (cpi->cpi_chipid >= 0) {
7291 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7292 			    "chip#", cpi->cpi_chipid);
7293 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7294 			    "clog#", cpi->cpi_clogid);
7295 		}
7296 	}
7297 
7298 	/* cpuid-features */
7299 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7300 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
7301 
7302 
7303 	/* cpuid-features-ecx */
7304 	switch (cpi->cpi_vendor) {
7305 	case X86_VENDOR_Intel:
7306 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
7307 		break;
7308 	case X86_VENDOR_AMD:
7309 		create = cpi->cpi_family >= 0xf;
7310 		break;
7311 	case X86_VENDOR_HYGON:
7312 		create = 1;
7313 		break;
7314 	default:
7315 		create = 0;
7316 		break;
7317 	}
7318 	if (create)
7319 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7320 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
7321 
7322 	/* ext-cpuid-features */
7323 	switch (cpi->cpi_vendor) {
7324 	case X86_VENDOR_Intel:
7325 	case X86_VENDOR_AMD:
7326 	case X86_VENDOR_HYGON:
7327 	case X86_VENDOR_Cyrix:
7328 	case X86_VENDOR_TM:
7329 	case X86_VENDOR_Centaur:
7330 		create = cpi->cpi_xmaxeax >= 0x80000001;
7331 		break;
7332 	default:
7333 		create = 0;
7334 		break;
7335 	}
7336 	if (create) {
7337 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7338 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
7339 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
7340 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
7341 	}
7342 
7343 	/*
7344 	 * Brand String first appeared in Intel Pentium IV, AMD K5
7345 	 * model 1, and Cyrix GXm.  On earlier models we try and
7346 	 * simulate something similar .. so this string should always
7347 	 * same -something- about the processor, however lame.
7348 	 */
7349 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
7350 	    "brand-string", cpi->cpi_brandstr);
7351 
7352 	/*
7353 	 * Finally, cache and tlb information
7354 	 */
7355 	switch (x86_which_cacheinfo(cpi)) {
7356 	case X86_VENDOR_Intel:
7357 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
7358 		break;
7359 	case X86_VENDOR_Cyrix:
7360 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
7361 		break;
7362 	case X86_VENDOR_AMD:
7363 		amd_cache_info(cpi, cpu_devi);
7364 		break;
7365 	default:
7366 		break;
7367 	}
7368 }
7369 
7370 struct l2info {
7371 	int *l2i_csz;
7372 	int *l2i_lsz;
7373 	int *l2i_assoc;
7374 	int l2i_ret;
7375 };
7376 
7377 /*
7378  * A cacheinfo walker that fetches the size, line-size and associativity
7379  * of the L2 cache
7380  */
7381 static int
7382 intel_l2cinfo(void *arg, const struct cachetab *ct)
7383 {
7384 	struct l2info *l2i = arg;
7385 	int *ip;
7386 
7387 	if (ct->ct_label != l2_cache_str &&
7388 	    ct->ct_label != sl2_cache_str)
7389 		return (0);	/* not an L2 -- keep walking */
7390 
7391 	if ((ip = l2i->l2i_csz) != NULL)
7392 		*ip = ct->ct_size;
7393 	if ((ip = l2i->l2i_lsz) != NULL)
7394 		*ip = ct->ct_line_size;
7395 	if ((ip = l2i->l2i_assoc) != NULL)
7396 		*ip = ct->ct_assoc;
7397 	l2i->l2i_ret = ct->ct_size;
7398 	return (1);		/* was an L2 -- terminate walk */
7399 }
7400 
7401 /*
7402  * AMD L2/L3 Cache and TLB Associativity Field Definition:
7403  *
7404  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
7405  *	value is the associativity, the associativity for the L2 cache and
7406  *	tlb is encoded in the following table. The 4 bit L2 value serves as
7407  *	an index into the amd_afd[] array to determine the associativity.
7408  *	-1 is undefined. 0 is fully associative.
7409  */
7410 
7411 static int amd_afd[] =
7412 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
7413 
7414 static void
7415 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
7416 {
7417 	struct cpuid_regs *cp;
7418 	uint_t size, assoc;
7419 	int i;
7420 	int *ip;
7421 
7422 	if (cpi->cpi_xmaxeax < 0x80000006)
7423 		return;
7424 	cp = &cpi->cpi_extd[6];
7425 
7426 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
7427 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
7428 		uint_t cachesz = size * 1024;
7429 		assoc = amd_afd[i];
7430 
7431 		ASSERT(assoc != -1);
7432 
7433 		if ((ip = l2i->l2i_csz) != NULL)
7434 			*ip = cachesz;
7435 		if ((ip = l2i->l2i_lsz) != NULL)
7436 			*ip = BITX(cp->cp_ecx, 7, 0);
7437 		if ((ip = l2i->l2i_assoc) != NULL)
7438 			*ip = assoc;
7439 		l2i->l2i_ret = cachesz;
7440 	}
7441 }
7442 
7443 int
7444 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
7445 {
7446 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
7447 	struct l2info __l2info, *l2i = &__l2info;
7448 
7449 	l2i->l2i_csz = csz;
7450 	l2i->l2i_lsz = lsz;
7451 	l2i->l2i_assoc = assoc;
7452 	l2i->l2i_ret = -1;
7453 
7454 	switch (x86_which_cacheinfo(cpi)) {
7455 	case X86_VENDOR_Intel:
7456 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
7457 		break;
7458 	case X86_VENDOR_Cyrix:
7459 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
7460 		break;
7461 	case X86_VENDOR_AMD:
7462 		amd_l2cacheinfo(cpi, l2i);
7463 		break;
7464 	default:
7465 		break;
7466 	}
7467 	return (l2i->l2i_ret);
7468 }
7469 
7470 #if !defined(__xpv)
7471 
7472 uint32_t *
7473 cpuid_mwait_alloc(cpu_t *cpu)
7474 {
7475 	uint32_t	*ret;
7476 	size_t		mwait_size;
7477 
7478 	ASSERT(cpuid_checkpass(CPU, CPUID_PASS_EXTENDED));
7479 
7480 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
7481 	if (mwait_size == 0)
7482 		return (NULL);
7483 
7484 	/*
7485 	 * kmem_alloc() returns cache line size aligned data for mwait_size
7486 	 * allocations.  mwait_size is currently cache line sized.  Neither
7487 	 * of these implementation details are guarantied to be true in the
7488 	 * future.
7489 	 *
7490 	 * First try allocating mwait_size as kmem_alloc() currently returns
7491 	 * correctly aligned memory.  If kmem_alloc() does not return
7492 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
7493 	 *
7494 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
7495 	 * decide to free this memory.
7496 	 */
7497 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
7498 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
7499 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
7500 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
7501 		*ret = MWAIT_RUNNING;
7502 		return (ret);
7503 	} else {
7504 		kmem_free(ret, mwait_size);
7505 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
7506 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
7507 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
7508 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
7509 		*ret = MWAIT_RUNNING;
7510 		return (ret);
7511 	}
7512 }
7513 
7514 void
7515 cpuid_mwait_free(cpu_t *cpu)
7516 {
7517 	if (cpu->cpu_m.mcpu_cpi == NULL) {
7518 		return;
7519 	}
7520 
7521 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
7522 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
7523 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
7524 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
7525 	}
7526 
7527 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
7528 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
7529 }
7530 
7531 void
7532 patch_tsc_read(int flag)
7533 {
7534 	size_t cnt;
7535 
7536 	switch (flag) {
7537 	case TSC_NONE:
7538 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
7539 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
7540 		break;
7541 	case TSC_RDTSC_LFENCE:
7542 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
7543 		(void) memcpy((void *)tsc_read,
7544 		    (void *)&_tsc_lfence_start, cnt);
7545 		break;
7546 	case TSC_TSCP:
7547 		cnt = &_tscp_end - &_tscp_start;
7548 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
7549 		break;
7550 	default:
7551 		/* Bail for unexpected TSC types. (TSC_NONE covers 0) */
7552 		cmn_err(CE_PANIC, "Unrecogized TSC type: %d", flag);
7553 		break;
7554 	}
7555 	tsc_type = flag;
7556 }
7557 
7558 int
7559 cpuid_deep_cstates_supported(void)
7560 {
7561 	struct cpuid_info *cpi;
7562 	struct cpuid_regs regs;
7563 
7564 	ASSERT(cpuid_checkpass(CPU, CPUID_PASS_BASIC));
7565 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
7566 
7567 	cpi = CPU->cpu_m.mcpu_cpi;
7568 
7569 	switch (cpi->cpi_vendor) {
7570 	case X86_VENDOR_Intel:
7571 		if (cpi->cpi_xmaxeax < 0x80000007)
7572 			return (0);
7573 
7574 		/*
7575 		 * Does TSC run at a constant rate in all C-states?
7576 		 */
7577 		regs.cp_eax = 0x80000007;
7578 		(void) __cpuid_insn(&regs);
7579 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
7580 
7581 	default:
7582 		return (0);
7583 	}
7584 }
7585 
7586 #endif	/* !__xpv */
7587 
7588 void
7589 post_startup_cpu_fixups(void)
7590 {
7591 #ifndef __xpv
7592 	/*
7593 	 * Some AMD processors support C1E state. Entering this state will
7594 	 * cause the local APIC timer to stop, which we can't deal with at
7595 	 * this time.
7596 	 */
7597 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
7598 		on_trap_data_t otd;
7599 		uint64_t reg;
7600 
7601 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
7602 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
7603 			/* Disable C1E state if it is enabled by BIOS */
7604 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
7605 			    AMD_ACTONCMPHALT_MASK) {
7606 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
7607 				    AMD_ACTONCMPHALT_SHIFT);
7608 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
7609 			}
7610 		}
7611 		no_trap();
7612 	}
7613 #endif	/* !__xpv */
7614 }
7615 
7616 void
7617 enable_pcid(void)
7618 {
7619 	if (x86_use_pcid == -1)
7620 		x86_use_pcid = is_x86_feature(x86_featureset, X86FSET_PCID);
7621 
7622 	if (x86_use_invpcid == -1) {
7623 		x86_use_invpcid = is_x86_feature(x86_featureset,
7624 		    X86FSET_INVPCID);
7625 	}
7626 
7627 	if (!x86_use_pcid)
7628 		return;
7629 
7630 	/*
7631 	 * Intel say that on setting PCIDE, it immediately starts using the PCID
7632 	 * bits; better make sure there's nothing there.
7633 	 */
7634 	ASSERT((getcr3() & MMU_PAGEOFFSET) == PCID_NONE);
7635 
7636 	setcr4(getcr4() | CR4_PCIDE);
7637 }
7638 
7639 /*
7640  * Setup necessary registers to enable XSAVE feature on this processor.
7641  * This function needs to be called early enough, so that no xsave/xrstor
7642  * ops will execute on the processor before the MSRs are properly set up.
7643  *
7644  * Current implementation has the following assumption:
7645  * - cpuid_pass_basic() is done, so that X86 features are known.
7646  * - fpu_probe() is done, so that fp_save_mech is chosen.
7647  */
7648 void
7649 xsave_setup_msr(cpu_t *cpu)
7650 {
7651 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
7652 	ASSERT(fp_save_mech == FP_XSAVE);
7653 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
7654 
7655 	/* Enable OSXSAVE in CR4. */
7656 	setcr4(getcr4() | CR4_OSXSAVE);
7657 	/*
7658 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
7659 	 * correct value.
7660 	 */
7661 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
7662 	setup_xfem();
7663 }
7664 
7665 /*
7666  * Starting with the Westmere processor the local
7667  * APIC timer will continue running in all C-states,
7668  * including the deepest C-states.
7669  */
7670 int
7671 cpuid_arat_supported(void)
7672 {
7673 	struct cpuid_info *cpi;
7674 	struct cpuid_regs regs;
7675 
7676 	ASSERT(cpuid_checkpass(CPU, CPUID_PASS_BASIC));
7677 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
7678 
7679 	cpi = CPU->cpu_m.mcpu_cpi;
7680 
7681 	switch (cpi->cpi_vendor) {
7682 	case X86_VENDOR_Intel:
7683 		/*
7684 		 * Always-running Local APIC Timer is
7685 		 * indicated by CPUID.6.EAX[2].
7686 		 */
7687 		if (cpi->cpi_maxeax >= 6) {
7688 			regs.cp_eax = 6;
7689 			(void) cpuid_insn(NULL, &regs);
7690 			return (regs.cp_eax & CPUID_INTC_EAX_ARAT);
7691 		} else {
7692 			return (0);
7693 		}
7694 	default:
7695 		return (0);
7696 	}
7697 }
7698 
7699 /*
7700  * Check support for Intel ENERGY_PERF_BIAS feature
7701  */
7702 int
7703 cpuid_iepb_supported(struct cpu *cp)
7704 {
7705 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
7706 	struct cpuid_regs regs;
7707 
7708 	ASSERT(cpuid_checkpass(cp, CPUID_PASS_BASIC));
7709 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
7710 
7711 	if (!(is_x86_feature(x86_featureset, X86FSET_MSR))) {
7712 		return (0);
7713 	}
7714 
7715 	/*
7716 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
7717 	 * capability bit CPUID.6.ECX.3
7718 	 */
7719 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
7720 		return (0);
7721 
7722 	regs.cp_eax = 0x6;
7723 	(void) cpuid_insn(NULL, &regs);
7724 	return (regs.cp_ecx & CPUID_INTC_ECX_PERFBIAS);
7725 }
7726 
7727 /*
7728  * Check support for TSC deadline timer
7729  *
7730  * TSC deadline timer provides a superior software programming
7731  * model over local APIC timer that eliminates "time drifts".
7732  * Instead of specifying a relative time, software specifies an
7733  * absolute time as the target at which the processor should
7734  * generate a timer event.
7735  */
7736 int
7737 cpuid_deadline_tsc_supported(void)
7738 {
7739 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
7740 	struct cpuid_regs regs;
7741 
7742 	ASSERT(cpuid_checkpass(CPU, CPUID_PASS_BASIC));
7743 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
7744 
7745 	switch (cpi->cpi_vendor) {
7746 	case X86_VENDOR_Intel:
7747 		if (cpi->cpi_maxeax >= 1) {
7748 			regs.cp_eax = 1;
7749 			(void) cpuid_insn(NULL, &regs);
7750 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
7751 		} else {
7752 			return (0);
7753 		}
7754 	default:
7755 		return (0);
7756 	}
7757 }
7758 
7759 #if !defined(__xpv)
7760 /*
7761  * Patch in versions of bcopy for high performance Intel Nhm processors
7762  * and later...
7763  */
7764 void
7765 patch_memops(uint_t vendor)
7766 {
7767 	size_t cnt, i;
7768 	caddr_t to, from;
7769 
7770 	if ((vendor == X86_VENDOR_Intel) &&
7771 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
7772 		cnt = &bcopy_patch_end - &bcopy_patch_start;
7773 		to = &bcopy_ck_size;
7774 		from = &bcopy_patch_start;
7775 		for (i = 0; i < cnt; i++) {
7776 			*to++ = *from++;
7777 		}
7778 	}
7779 }
7780 #endif  /*  !__xpv */
7781 
7782 /*
7783  * We're being asked to tell the system how many bits are required to represent
7784  * the various thread and strand IDs. While it's tempting to derive this based
7785  * on the values in cpi_ncore_per_chip and cpi_ncpu_per_chip, that isn't quite
7786  * correct. Instead, this needs to be based on the number of bits that the APIC
7787  * allows for these different configurations. We only update these to a larger
7788  * value if we find one.
7789  */
7790 void
7791 cpuid_get_ext_topo(cpu_t *cpu, uint_t *core_nbits, uint_t *strand_nbits)
7792 {
7793 	struct cpuid_info *cpi;
7794 
7795 	VERIFY(cpuid_checkpass(CPU, CPUID_PASS_BASIC));
7796 	cpi = cpu->cpu_m.mcpu_cpi;
7797 
7798 	if (cpi->cpi_ncore_bits > *core_nbits) {
7799 		*core_nbits = cpi->cpi_ncore_bits;
7800 	}
7801 
7802 	if (cpi->cpi_nthread_bits > *strand_nbits) {
7803 		*strand_nbits = cpi->cpi_nthread_bits;
7804 	}
7805 }
7806 
7807 void
7808 cpuid_pass_ucode(cpu_t *cpu, uchar_t *fset)
7809 {
7810 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
7811 	struct cpuid_regs cp;
7812 
7813 	/*
7814 	 * Reread the CPUID portions that we need for various security
7815 	 * information.
7816 	 */
7817 	if (cpi->cpi_vendor == X86_VENDOR_Intel) {
7818 		/*
7819 		 * Check if we now have leaf 7 available to us.
7820 		 */
7821 		if (cpi->cpi_maxeax < 7) {
7822 			bzero(&cp, sizeof (cp));
7823 			cp.cp_eax = 0;
7824 			cpi->cpi_maxeax = __cpuid_insn(&cp);
7825 			if (cpi->cpi_maxeax < 7)
7826 				return;
7827 		}
7828 
7829 		bzero(&cp, sizeof (cp));
7830 		cp.cp_eax = 7;
7831 		cp.cp_ecx = 0;
7832 		(void) __cpuid_insn(&cp);
7833 		cpi->cpi_std[7] = cp;
7834 	} else if (cpi->cpi_vendor == X86_VENDOR_AMD ||
7835 	    cpi->cpi_vendor == X86_VENDOR_HYGON) {
7836 		/* No xcpuid support */
7837 		if (cpi->cpi_family < 5 ||
7838 		    (cpi->cpi_family == 5 && cpi->cpi_model < 1))
7839 			return;
7840 
7841 		if (cpi->cpi_xmaxeax < CPUID_LEAF_EXT_8) {
7842 			bzero(&cp, sizeof (cp));
7843 			cp.cp_eax = CPUID_LEAF_EXT_0;
7844 			cpi->cpi_xmaxeax = __cpuid_insn(&cp);
7845 			if (cpi->cpi_xmaxeax < CPUID_LEAF_EXT_8) {
7846 				return;
7847 			}
7848 		}
7849 
7850 		/*
7851 		 * Most AMD features are in leaf 8. Automatic IBRS was added in
7852 		 * leaf 0x21. So we also check that.
7853 		 */
7854 		bzero(&cp, sizeof (cp));
7855 		cp.cp_eax = CPUID_LEAF_EXT_8;
7856 		(void) __cpuid_insn(&cp);
7857 		platform_cpuid_mangle(cpi->cpi_vendor, CPUID_LEAF_EXT_8, &cp);
7858 		cpi->cpi_extd[8] = cp;
7859 
7860 		if (cpi->cpi_xmaxeax < CPUID_LEAF_EXT_21) {
7861 			return;
7862 		}
7863 
7864 		bzero(&cp, sizeof (cp));
7865 		cp.cp_eax = CPUID_LEAF_EXT_21;
7866 		(void) __cpuid_insn(&cp);
7867 		platform_cpuid_mangle(cpi->cpi_vendor, CPUID_LEAF_EXT_21, &cp);
7868 		cpi->cpi_extd[0x21] = cp;
7869 	} else {
7870 		/*
7871 		 * Nothing to do here. Return an empty set which has already
7872 		 * been zeroed for us.
7873 		 */
7874 		return;
7875 	}
7876 	cpuid_scan_security(cpu, fset);
7877 }
7878 
7879 /* ARGSUSED */
7880 static int
7881 cpuid_post_ucodeadm_xc(xc_arg_t arg0, xc_arg_t arg1, xc_arg_t arg2)
7882 {
7883 	uchar_t *fset;
7884 	boolean_t first_pass = (boolean_t)arg1;
7885 
7886 	fset = (uchar_t *)(arg0 + sizeof (x86_featureset) * CPU->cpu_id);
7887 	if (first_pass && CPU->cpu_id != 0)
7888 		return (0);
7889 	if (!first_pass && CPU->cpu_id == 0)
7890 		return (0);
7891 	cpuid_pass_ucode(CPU, fset);
7892 
7893 	return (0);
7894 }
7895 
7896 /*
7897  * After a microcode update where the version has changed, then we need to
7898  * rescan CPUID. To do this we check every CPU to make sure that they have the
7899  * same microcode. Then we perform a cross call to all such CPUs. It's the
7900  * caller's job to make sure that no one else can end up doing an update while
7901  * this is going on.
7902  *
7903  * We assume that the system is microcode capable if we're called.
7904  */
7905 void
7906 cpuid_post_ucodeadm(void)
7907 {
7908 	uint32_t rev;
7909 	int i;
7910 	struct cpu *cpu;
7911 	cpuset_t cpuset;
7912 	void *argdata;
7913 	uchar_t *f0;
7914 
7915 	argdata = kmem_zalloc(sizeof (x86_featureset) * NCPU, KM_SLEEP);
7916 
7917 	mutex_enter(&cpu_lock);
7918 	cpu = cpu_get(0);
7919 	rev = cpu->cpu_m.mcpu_ucode_info->cui_rev;
7920 	CPUSET_ONLY(cpuset, 0);
7921 	for (i = 1; i < max_ncpus; i++) {
7922 		if ((cpu = cpu_get(i)) == NULL)
7923 			continue;
7924 
7925 		if (cpu->cpu_m.mcpu_ucode_info->cui_rev != rev) {
7926 			panic("post microcode update CPU %d has differing "
7927 			    "microcode revision (%u) from CPU 0 (%u)",
7928 			    i, cpu->cpu_m.mcpu_ucode_info->cui_rev, rev);
7929 		}
7930 		CPUSET_ADD(cpuset, i);
7931 	}
7932 
7933 	/*
7934 	 * We do the cross calls in two passes. The first pass is only for the
7935 	 * boot CPU. The second pass is for all of the other CPUs. This allows
7936 	 * the boot CPU to go through and change behavior related to patching or
7937 	 * whether or not Enhanced IBRS needs to be enabled and then allow all
7938 	 * other CPUs to follow suit.
7939 	 */
7940 	kpreempt_disable();
7941 	xc_sync((xc_arg_t)argdata, B_TRUE, 0, CPUSET2BV(cpuset),
7942 	    cpuid_post_ucodeadm_xc);
7943 	xc_sync((xc_arg_t)argdata, B_FALSE, 0, CPUSET2BV(cpuset),
7944 	    cpuid_post_ucodeadm_xc);
7945 	kpreempt_enable();
7946 
7947 	/*
7948 	 * OK, now look at each CPU and see if their feature sets are equal.
7949 	 */
7950 	f0 = argdata;
7951 	for (i = 1; i < max_ncpus; i++) {
7952 		uchar_t *fset;
7953 		if (!CPU_IN_SET(cpuset, i))
7954 			continue;
7955 
7956 		fset = (uchar_t *)((uintptr_t)argdata +
7957 		    sizeof (x86_featureset) * i);
7958 
7959 		if (!compare_x86_featureset(f0, fset)) {
7960 			panic("Post microcode update CPU %d has "
7961 			    "differing security feature (%p) set from CPU 0 "
7962 			    "(%p), not appending to feature set", i,
7963 			    (void *)fset, (void *)f0);
7964 		}
7965 	}
7966 
7967 	mutex_exit(&cpu_lock);
7968 
7969 	for (i = 0; i < NUM_X86_FEATURES; i++) {
7970 		cmn_err(CE_CONT, "?post-ucode x86_feature: %s\n",
7971 		    x86_feature_names[i]);
7972 		if (is_x86_feature(f0, i)) {
7973 			add_x86_feature(x86_featureset, i);
7974 		}
7975 	}
7976 	kmem_free(argdata, sizeof (x86_featureset) * NCPU);
7977 }
7978 
7979 typedef void (*cpuid_pass_f)(cpu_t *, void *);
7980 
7981 typedef struct cpuid_pass_def {
7982 	cpuid_pass_t cpd_pass;
7983 	cpuid_pass_f cpd_func;
7984 } cpuid_pass_def_t;
7985 
7986 /*
7987  * See block comment at the top; note that cpuid_pass_ucode is not a pass in the
7988  * normal sense and should not appear here.
7989  */
7990 static const cpuid_pass_def_t cpuid_pass_defs[] = {
7991 	{ CPUID_PASS_PRELUDE, cpuid_pass_prelude },
7992 	{ CPUID_PASS_IDENT, cpuid_pass_ident },
7993 	{ CPUID_PASS_BASIC, cpuid_pass_basic },
7994 	{ CPUID_PASS_EXTENDED, cpuid_pass_extended },
7995 	{ CPUID_PASS_DYNAMIC, cpuid_pass_dynamic },
7996 	{ CPUID_PASS_RESOLVE, cpuid_pass_resolve },
7997 };
7998 
7999 void
8000 cpuid_execpass(cpu_t *cp, cpuid_pass_t pass, void *arg)
8001 {
8002 	VERIFY3S(pass, !=, CPUID_PASS_NONE);
8003 
8004 	if (cp == NULL)
8005 		cp = CPU;
8006 
8007 	/*
8008 	 * Space statically allocated for BSP, ensure pointer is set
8009 	 */
8010 	if (cp->cpu_id == 0 && cp->cpu_m.mcpu_cpi == NULL)
8011 		cp->cpu_m.mcpu_cpi = &cpuid_info0;
8012 
8013 	ASSERT(cpuid_checkpass(cp, pass - 1));
8014 
8015 	for (uint_t i = 0; i < ARRAY_SIZE(cpuid_pass_defs); i++) {
8016 		if (cpuid_pass_defs[i].cpd_pass == pass) {
8017 			cpuid_pass_defs[i].cpd_func(cp, arg);
8018 			cp->cpu_m.mcpu_cpi->cpi_pass = pass;
8019 			return;
8020 		}
8021 	}
8022 
8023 	panic("unable to execute invalid cpuid pass %d on cpu%d\n",
8024 	    pass, cp->cpu_id);
8025 }
8026 
8027 /*
8028  * Extract the processor family from a chiprev.  Processor families are not the
8029  * same as cpuid families; see comments above and in x86_archext.h.
8030  */
8031 x86_processor_family_t
8032 chiprev_family(const x86_chiprev_t cr)
8033 {
8034 	return ((x86_processor_family_t)_X86_CHIPREV_FAMILY(cr));
8035 }
8036 
8037 /*
8038  * A chiprev matches its template if the vendor and family are identical and the
8039  * revision of the chiprev matches one of the bits set in the template.  Callers
8040  * may bitwise-OR together chiprevs of the same vendor and family to form the
8041  * template, or use the _ANY variant.  It is not possible to match chiprevs of
8042  * multiple vendors or processor families with a single call.  Note that this
8043  * function operates on processor families, not cpuid families.
8044  */
8045 boolean_t
8046 chiprev_matches(const x86_chiprev_t cr, const x86_chiprev_t template)
8047 {
8048 	return (_X86_CHIPREV_VENDOR(cr) == _X86_CHIPREV_VENDOR(template) &&
8049 	    _X86_CHIPREV_FAMILY(cr) == _X86_CHIPREV_FAMILY(template) &&
8050 	    (_X86_CHIPREV_REV(cr) & _X86_CHIPREV_REV(template)) != 0);
8051 }
8052 
8053 /*
8054  * A chiprev is at least min if the vendor and family are identical and the
8055  * revision of the chiprev is at least as recent as that of min.  Processor
8056  * families are considered unordered and cannot be compared using this function.
8057  * Note that this function operates on processor families, not cpuid families.
8058  * Use of the _ANY chiprev variant with this function is not useful; it will
8059  * always return B_FALSE if the _ANY variant is supplied as the minimum
8060  * revision.  To determine only whether a chiprev is of a given processor
8061  * family, test the return value of chiprev_family() instead.
8062  */
8063 boolean_t
8064 chiprev_at_least(const x86_chiprev_t cr, const x86_chiprev_t min)
8065 {
8066 	return (_X86_CHIPREV_VENDOR(cr) == _X86_CHIPREV_VENDOR(min) &&
8067 	    _X86_CHIPREV_FAMILY(cr) == _X86_CHIPREV_FAMILY(min) &&
8068 	    _X86_CHIPREV_REV(cr) >= _X86_CHIPREV_REV(min));
8069 }
8070 
8071 /*
8072  * The uarch functions operate in a manner similar to the chiprev functions
8073  * above.  While it is tempting to allow these to operate on microarchitectures
8074  * produced by a specific vendor in an ordered fashion (e.g., ZEN3 is "newer"
8075  * than ZEN2), we elect not to do so because a manufacturer may supply
8076  * processors of multiple different microarchitecture families each of which may
8077  * be internally ordered but unordered with respect to those of other families.
8078  */
8079 x86_uarch_t
8080 uarchrev_uarch(const x86_uarchrev_t ur)
8081 {
8082 	return ((x86_uarch_t)_X86_UARCHREV_UARCH(ur));
8083 }
8084 
8085 boolean_t
8086 uarchrev_matches(const x86_uarchrev_t ur, const x86_uarchrev_t template)
8087 {
8088 	return (_X86_UARCHREV_VENDOR(ur) == _X86_UARCHREV_VENDOR(template) &&
8089 	    _X86_UARCHREV_UARCH(ur) == _X86_UARCHREV_UARCH(template) &&
8090 	    (_X86_UARCHREV_REV(ur) & _X86_UARCHREV_REV(template)) != 0);
8091 }
8092 
8093 boolean_t
8094 uarchrev_at_least(const x86_uarchrev_t ur, const x86_uarchrev_t min)
8095 {
8096 	return (_X86_UARCHREV_VENDOR(ur) == _X86_UARCHREV_VENDOR(min) &&
8097 	    _X86_UARCHREV_UARCH(ur) == _X86_UARCHREV_UARCH(min) &&
8098 	    _X86_UARCHREV_REV(ur) >= _X86_UARCHREV_REV(min));
8099 }
8100 
8101 /*
8102  * Topology cache related information. This is yet another cache interface that
8103  * we're exposing out intended to be used when we have either Intel Leaf 4 or
8104  * AMD Leaf 8x1D (introduced with Zen 1).
8105  */
8106 static boolean_t
8107 cpuid_cache_topo_sup(const struct cpuid_info *cpi)
8108 {
8109 	switch (cpi->cpi_vendor) {
8110 	case X86_VENDOR_Intel:
8111 		if (cpi->cpi_maxeax >= 4) {
8112 			return (B_TRUE);
8113 		}
8114 		break;
8115 	case X86_VENDOR_AMD:
8116 	case X86_VENDOR_HYGON:
8117 		if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_1d &&
8118 		    is_x86_feature(x86_featureset, X86FSET_TOPOEXT)) {
8119 			return (B_TRUE);
8120 		}
8121 		break;
8122 	default:
8123 		break;
8124 	}
8125 
8126 	return (B_FALSE);
8127 }
8128 
8129 int
8130 cpuid_getncaches(struct cpu *cpu, uint32_t *ncache)
8131 {
8132 	const struct cpuid_info *cpi;
8133 
8134 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_DYNAMIC));
8135 	cpi = cpu->cpu_m.mcpu_cpi;
8136 
8137 	if (!cpuid_cache_topo_sup(cpi)) {
8138 		return (ENOTSUP);
8139 	}
8140 
8141 	*ncache = cpi->cpi_cache_leaf_size;
8142 	return (0);
8143 }
8144 
8145 int
8146 cpuid_getcache(struct cpu *cpu, uint32_t cno, x86_cache_t *cache)
8147 {
8148 	const struct cpuid_info *cpi;
8149 	const struct cpuid_regs *cp;
8150 
8151 	ASSERT(cpuid_checkpass(cpu, CPUID_PASS_DYNAMIC));
8152 	cpi = cpu->cpu_m.mcpu_cpi;
8153 
8154 	if (!cpuid_cache_topo_sup(cpi)) {
8155 		return (ENOTSUP);
8156 	}
8157 
8158 	if (cno >= cpi->cpi_cache_leaf_size) {
8159 		return (EINVAL);
8160 	}
8161 
8162 	bzero(cache, sizeof (cache));
8163 	cp = cpi->cpi_cache_leaves[cno];
8164 	switch (CPI_CACHE_TYPE(cp)) {
8165 	case CPI_CACHE_TYPE_DATA:
8166 		cache->xc_type = X86_CACHE_TYPE_DATA;
8167 		break;
8168 	case CPI_CACHE_TYPE_INSTR:
8169 		cache->xc_type = X86_CACHE_TYPE_INST;
8170 		break;
8171 	case CPI_CACHE_TYPE_UNIFIED:
8172 		cache->xc_type = X86_CACHE_TYPE_UNIFIED;
8173 		break;
8174 	case CPI_CACHE_TYPE_DONE:
8175 	default:
8176 		return (EINVAL);
8177 	}
8178 	cache->xc_level = CPI_CACHE_LVL(cp);
8179 	if (CPI_FULL_ASSOC_CACHE(cp) != 0) {
8180 		cache->xc_flags |= X86_CACHE_F_FULL_ASSOC;
8181 	}
8182 	cache->xc_nparts = CPI_CACHE_PARTS(cp) + 1;
8183 	/*
8184 	 * The number of sets is reserved on AMD if the CPU is tagged as fully
8185 	 * associative, where as it is considered valid on Intel.
8186 	 */
8187 	if (cpi->cpi_vendor == X86_VENDOR_AMD &&
8188 	    CPI_FULL_ASSOC_CACHE(cp) != 0) {
8189 		cache->xc_nsets = 1;
8190 	} else {
8191 		cache->xc_nsets = CPI_CACHE_SETS(cp) + 1;
8192 	}
8193 	cache->xc_nways = CPI_CACHE_WAYS(cp) + 1;
8194 	cache->xc_line_size = CPI_CACHE_COH_LN_SZ(cp) + 1;
8195 	cache->xc_size = cache->xc_nparts * cache->xc_nsets * cache->xc_nways *
8196 	    cache->xc_line_size;
8197 	/*
8198 	 * We're looking for the number of bits to cover the number of CPUs that
8199 	 * are being shared. Normally this would be the value - 1, but the CPUID
8200 	 * value is encoded as the actual value minus one, so we don't modify
8201 	 * this at all.
8202 	 */
8203 	cache->xc_apic_shift = highbit(CPI_NTHR_SHR_CACHE(cp));
8204 
8205 	/*
8206 	 * To construct a unique ID we construct a uint64_t that looks as
8207 	 * follows:
8208 	 *
8209 	 * [47:40] cache level
8210 	 * [39:32] CPUID cache type
8211 	 * [31:00] shifted APIC ID
8212 	 *
8213 	 * The shifted APIC ID gives us a guarantee that a given cache entry is
8214 	 * unique within its peers. The other two numbers give us something that
8215 	 * ensures that something is unique within the CPU. If we just had the
8216 	 * APIC ID shifted over by the indicated number of bits we'd end up with
8217 	 * an ID of zero for the L1I, L1D, L2, and L3.
8218 	 *
8219 	 * The format of this ID is private to the system and can change across
8220 	 * a reboot for the time being.
8221 	 */
8222 	cache->xc_id = (uint64_t)cache->xc_level << 40;
8223 	cache->xc_id |= (uint64_t)cache->xc_type << 32;
8224 	cache->xc_id |= (uint64_t)cpi->cpi_apicid >> cache->xc_apic_shift;
8225 
8226 	return (0);
8227 }
8228