1c6609a6br/*-
2c6609a6br * Copyright (c) 2015 The FreeBSD Foundation
3c6609a6br * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
4c6609a6br * All rights reserved.
5c6609a6br *
6c6609a6br * Portions of this software were developed by Andrew Turner under
7c6609a6br * sponsorship from the FreeBSD Foundation.
8c6609a6br *
9c6609a6br * Portions of this software were developed by SRI International and the
10c6609a6br * University of Cambridge Computer Laboratory under DARPA/AFRL contract
11c6609a6br * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
12c6609a6br *
13c6609a6br * Portions of this software were developed by the University of Cambridge
14c6609a6br * Computer Laboratory as part of the CTSRD Project, with support from the
15c6609a6br * UK Higher Education Innovation Fund (HEIF).
16c6609a6br *
17c6609a6br * Redistribution and use in source and binary forms, with or without
18c6609a6br * modification, are permitted provided that the following conditions
19c6609a6br * are met:
20c6609a6br * 1. Redistributions of source code must retain the above copyright
21c6609a6br *    notice, this list of conditions and the following disclaimer.
22c6609a6br * 2. Redistributions in binary form must reproduce the above copyright
23c6609a6br *    notice, this list of conditions and the following disclaimer in the
24c6609a6br *    documentation and/or other materials provided with the distribution.
25c6609a6br *
26c6609a6br * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27c6609a6br * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28c6609a6br * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29c6609a6br * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30c6609a6br * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31c6609a6br * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32c6609a6br * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33c6609a6br * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34c6609a6br * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35c6609a6br * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36c6609a6br * SUCH DAMAGE.
37c6609a6br */
38c6609a6br
39c6609a6br#include "opt_kstack_pages.h"
40c6609a6br#include "opt_platform.h"
41c6609a6br
42c6609a6br#include <sys/cdefs.h>
43c6609a6br__FBSDID("$FreeBSD$");
44c6609a6br
45c6609a6br#include <sys/param.h>
46c6609a6br#include <sys/systm.h>
47c6609a6br#include <sys/bus.h>
48c6609a6br#include <sys/cpu.h>
49c6609a6br#include <sys/kernel.h>
50250e158cem#include <sys/ktr.h>
51c6609a6br#include <sys/malloc.h>
52c6609a6br#include <sys/module.h>
53c6609a6br#include <sys/mutex.h>
54c6609a6br#include <sys/proc.h>
55c6609a6br#include <sys/sched.h>
56c6609a6br#include <sys/smp.h>
57c6609a6br
58c6609a6br#include <vm/vm.h>
59c6609a6br#include <vm/pmap.h>
60c6609a6br#include <vm/vm_extern.h>
61c6609a6br#include <vm/vm_kern.h>
62ad0bb33markj#include <vm/vm_map.h>
63c6609a6br
64c6609a6br#include <machine/intr.h>
65c6609a6br#include <machine/smp.h>
6668c30b1br#include <machine/sbi.h>
67c6609a6br
68c6609a6br#ifdef FDT
69c6609a6br#include <dev/ofw/openfirm.h>
70c6609a6br#include <dev/ofw/ofw_cpu.h>
71c6609a6br#endif
72c6609a6br
73c6609a6brboolean_t ofw_cpu_reg(phandle_t node, u_int, cell_t *);
74c6609a6br
75c6609a6bruint32_t __riscv_boot_ap[MAXCPU];
76c6609a6br
77c6609a6brstatic enum {
78c6609a6br	CPUS_UNKNOWN,
79c6609a6br#ifdef FDT
80c6609a6br	CPUS_FDT,
81c6609a6br#endif
82c6609a6br} cpu_enum_method;
83c6609a6br
84c6609a6brstatic device_identify_t riscv64_cpu_identify;
85c6609a6brstatic device_probe_t riscv64_cpu_probe;
86c6609a6brstatic device_attach_t riscv64_cpu_attach;
87c6609a6br
88c6609a6brstatic int ipi_handler(void *);
89c6609a6br
90c6609a6brstruct pcb stoppcbs[MAXCPU];
91c6609a6br
929423805brextern uint32_t boot_hart;
939423805brextern cpuset_t all_harts;
949423805br
95c6609a6br#ifdef INVARIANTS
96c6609a6brstatic uint32_t cpu_reg[MAXCPU][2];
97c6609a6br#endif
98c6609a6brstatic device_t cpu_list[MAXCPU];
99c6609a6br
10031f17c4mhornevoid mpentry(u_long hartid);
101c6609a6brvoid init_secondary(uint64_t);
102c6609a6br
103b18019emarkjstatic struct mtx ap_boot_mtx;
104b18019emarkj
105b18019emarkj/* Stacks for AP initialization, discarded once idle threads are started. */
106b18019emarkjvoid *bootstack;
107b18019emarkjstatic void *bootstacks[MAXCPU];
108b18019emarkj
109b18019emarkj/* Count of started APs, used to synchronize access to bootstack. */
110b18019emarkjstatic volatile int aps_started;
111c6609a6br
112c6609a6br/* Set to 1 once we're ready to let the APs out of the pen. */
113b18019emarkjstatic volatile int aps_ready;
114c6609a6br
115c6609a6br/* Temporary variables for init_secondary()  */
116c6609a6brvoid *dpcpu[MAXCPU - 1];
117c6609a6br
118c6609a6brstatic device_method_t riscv64_cpu_methods[] = {
119c6609a6br	/* Device interface */
120c6609a6br	DEVMETHOD(device_identify,	riscv64_cpu_identify),
121c6609a6br	DEVMETHOD(device_probe,		riscv64_cpu_probe),
122c6609a6br	DEVMETHOD(device_attach,	riscv64_cpu_attach),
123c6609a6br
124c6609a6br	DEVMETHOD_END
125c6609a6br};
126c6609a6br
127c6609a6brstatic devclass_t riscv64_cpu_devclass;
128c6609a6brstatic driver_t riscv64_cpu_driver = {
129c6609a6br	"riscv64_cpu",
130c6609a6br	riscv64_cpu_methods,
131c6609a6br	0
132c6609a6br};
133c6609a6br
134c6609a6brDRIVER_MODULE(riscv64_cpu, cpu, riscv64_cpu_driver, riscv64_cpu_devclass, 0, 0);
135c6609a6br
136c6609a6brstatic void
137c6609a6brriscv64_cpu_identify(driver_t *driver, device_t parent)
138c6609a6br{
139c6609a6br
140c6609a6br	if (device_find_child(parent, "riscv64_cpu", -1) != NULL)
141c6609a6br		return;
142c6609a6br	if (BUS_ADD_CHILD(parent, 0, "riscv64_cpu", -1) == NULL)
143c6609a6br		device_printf(parent, "add child failed\n");
144c6609a6br}
145c6609a6br
146c6609a6brstatic int
147c6609a6brriscv64_cpu_probe(device_t dev)
148c6609a6br{
149c6609a6br	u_int cpuid;
150c6609a6br
151c6609a6br	cpuid = device_get_unit(dev);
152c6609a6br	if (cpuid >= MAXCPU || cpuid > mp_maxid)
153c6609a6br		return (EINVAL);
154c6609a6br
155c6609a6br	device_quiet(dev);
156c6609a6br	return (0);
157c6609a6br}
158c6609a6br
159c6609a6brstatic int
160c6609a6brriscv64_cpu_attach(device_t dev)
161c6609a6br{
162c6609a6br	const uint32_t *reg;
163c6609a6br	size_t reg_size;
164c6609a6br	u_int cpuid;
165c6609a6br	int i;
166c6609a6br
167c6609a6br	cpuid = device_get_unit(dev);
168c6609a6br
169c6609a6br	if (cpuid >= MAXCPU || cpuid > mp_maxid)
170c6609a6br		return (EINVAL);
171c6609a6br	KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
172c6609a6br
173c6609a6br	reg = cpu_get_cpuid(dev, &reg_size);
174c6609a6br	if (reg == NULL)
175c6609a6br		return (EINVAL);
176c6609a6br
177c6609a6br	if (bootverbose) {
178c6609a6br		device_printf(dev, "register <");
179c6609a6br		for (i = 0; i < reg_size; i++)
180c6609a6br			printf("%s%x", (i == 0) ? "" : " ", reg[i]);
181c6609a6br		printf(">\n");
182c6609a6br	}
183c6609a6br
184c6609a6br	/* Set the device to start it later */
185c6609a6br	cpu_list[cpuid] = dev;
186c6609a6br
187c6609a6br	return (0);
188c6609a6br}
189c6609a6br
190c6609a6brstatic void
191c6609a6brrelease_aps(void *dummy __unused)
192c6609a6br{
1939423805br	cpuset_t mask;
19441017fakp	int i;
195c6609a6br
196c6609a6br	if (mp_ncpus == 1)
197c6609a6br		return;
198c6609a6br
199c6609a6br	/* Setup the IPI handler */
200c6609a6br	riscv_setup_ipihandler(ipi_handler);
201c6609a6br
202c6609a6br	atomic_store_rel_int(&aps_ready, 1);
203c6609a6br
2041e97e20br	/* Wake up the other CPUs */
2059423805br	mask = all_harts;
2069423805br	CPU_CLR(boot_hart, &mask);
2071e97e20br
208c6609a6br	printf("Release APs\n");
209c6609a6br
2109423805br	sbi_send_ipi(mask.__bits);
2119423805br
212c6609a6br	for (i = 0; i < 2000; i++) {
213c390d50kp		if (smp_started)
214c6609a6br			return;
215c6609a6br		DELAY(1000);
216c6609a6br	}
217c6609a6br
218c6609a6br	printf("APs not started\n");
219c6609a6br}
220c6609a6brSYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
221c6609a6br
222c6609a6brvoid
2239423805brinit_secondary(uint64_t hart)
224c6609a6br{
225c6609a6br	struct pcpu *pcpup;
2269423805br	u_int cpuid;
2279423805br
2289423805br	/* Renumber this cpu */
2299423805br	cpuid = hart;
2309423805br	if (cpuid < boot_hart)
2319423805br		cpuid += mp_maxid + 1;
2329423805br	cpuid -= boot_hart;
233c6609a6br
234c6609a6br	/* Setup the pcpu pointer */
2359423805br	pcpup = &__pcpu[cpuid];
23613a454cmhorne	__asm __volatile("mv tp, %0" :: "r"(pcpup));
237c6609a6br
2381e97e20br	/* Workaround: make sure wfi doesn't halt the hart */
2391e97e20br	csr_set(sie, SIE_SSIE);
2401e97e20br	csr_set(sip, SIE_SSIE);
2411e97e20br
242b18019emarkj	/* Signal the BSP and spin until it has released all APs. */
243b18019emarkj	atomic_add_int(&aps_started, 1);
244b18019emarkj	while (!atomic_load_int(&aps_ready))
245c6609a6br		__asm __volatile("wfi");
246c6609a6br
247c6609a6br	/* Initialize curthread */
248c6609a6br	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
249c6609a6br	pcpup->pc_curthread = pcpup->pc_idlethread;
250c6609a6br
251c6609a6br	/*
252c6609a6br	 * Identify current CPU. This is necessary to setup
253c6609a6br	 * affinity registers and to provide support for
254c6609a6br	 * runtime chip identification.
255c6609a6br	 */
256c6609a6br	identify_cpu();
257c6609a6br
258c6609a6br	/* Enable software interrupts */
259c6609a6br	riscv_unmask_ipi();
260c6609a6br
261e9360c3mhorne#ifndef EARLY_AP_STARTUP
262c6609a6br	/* Start per-CPU event timers. */
263c6609a6br	cpu_initclocks_ap();
264e9360c3mhorne#endif
265c6609a6br
266ef85338br	/* Enable external (PLIC) interrupts */
267ef85338br	csr_set(sie, SIE_SEIE);
268ef85338br
269ad0bb33markj	/* Activate process 0's pmap. */
270ad0bb33markj	pmap_activate_boot(vmspace_pmap(proc0.p_vmspace));
271ad0bb33markj
272c6609a6br	mtx_lock_spin(&ap_boot_mtx);
273c6609a6br
274c6609a6br	atomic_add_rel_32(&smp_cpus, 1);
275c6609a6br
276c6609a6br	if (smp_cpus == mp_ncpus) {
277c6609a6br		/* enable IPI's, tlb shootdown, freezes etc */
278c6609a6br		atomic_store_rel_int(&smp_started, 1);
279c6609a6br	}
280c6609a6br
281c6609a6br	mtx_unlock_spin(&ap_boot_mtx);
282c6609a6br
283b18019emarkj	/*
284b18019emarkj	 * Assert that smp_after_idle_runnable condition is reasonable.
285b18019emarkj	 */
286b18019emarkj	MPASS(PCPU_GET(curpcb) == NULL);
287b18019emarkj
288c6609a6br	/* Enter the scheduler */
289c6609a6br	sched_throw(NULL);
290c6609a6br
291c6609a6br	panic("scheduler returned us to init_secondary");
292c6609a6br	/* NOTREACHED */
293c6609a6br}
294c6609a6br
295b18019emarkjstatic void
296b18019emarkjsmp_after_idle_runnable(void *arg __unused)
297b18019emarkj{
298b18019emarkj	struct pcpu *pc;
299b18019emarkj	int cpu;
300b18019emarkj
30131f17c4mhorne	for (cpu = 1; cpu <= mp_maxid; cpu++) {
302b18019emarkj		if (bootstacks[cpu] != NULL) {
303b18019emarkj			pc = pcpu_find(cpu);
304b18019emarkj			while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
305b18019emarkj				cpu_spinwait();
306b18019emarkj			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
307b18019emarkj		}
308b18019emarkj	}
309b18019emarkj}
310b18019emarkjSYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
311b18019emarkj    smp_after_idle_runnable, NULL);
312b18019emarkj
313c6609a6brstatic int
314c6609a6bripi_handler(void *arg)
315c6609a6br{
316c6609a6br	u_int ipi_bitmap;
317c6609a6br	u_int cpu, ipi;
318c6609a6br	int bit;
319c6609a6br
32068c30b1br	sbi_clear_ipi();
321c6609a6br
322c6609a6br	cpu = PCPU_GET(cpuid);
323c6609a6br
324c6609a6br	mb();
325c6609a6br
326c6609a6br	ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
327c6609a6br	if (ipi_bitmap == 0)
328c6609a6br		return (FILTER_HANDLED);
329c6609a6br
330c6609a6br	while ((bit = ffs(ipi_bitmap))) {
331c6609a6br		bit = (bit - 1);
332c6609a6br		ipi = (1 << bit);
333c6609a6br		ipi_bitmap &= ~ipi;
334c6609a6br
335c6609a6br		mb();
336c6609a6br
337c6609a6br		switch (ipi) {
338c6609a6br		case IPI_AST:
339c6609a6br			CTR0(KTR_SMP, "IPI_AST");
340c6609a6br			break;
341c6609a6br		case IPI_PREEMPT:
342c6609a6br			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
343c6609a6br			sched_preempt(curthread);
344c6609a6br			break;
345c6609a6br		case IPI_RENDEZVOUS:
346c6609a6br			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
347c6609a6br			smp_rendezvous_action();
348c6609a6br			break;
349c6609a6br		case IPI_STOP:
350c6609a6br		case IPI_STOP_HARD:
351c6609a6br			CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD");
352c6609a6br			savectx(&stoppcbs[cpu]);
353c6609a6br
354c6609a6br			/* Indicate we are stopped */
355c6609a6br			CPU_SET_ATOMIC(cpu, &stopped_cpus);
356c6609a6br
357c6609a6br			/* Wait for restart */
358c6609a6br			while (!CPU_ISSET(cpu, &started_cpus))
359c6609a6br				cpu_spinwait();
360c6609a6br
361c6609a6br			CPU_CLR_ATOMIC(cpu, &started_cpus);
362c6609a6br			CPU_CLR_ATOMIC(cpu, &stopped_cpus);
363c6609a6br			CTR0(KTR_SMP, "IPI_STOP (restart)");
364c741b8bjhb
365c741b8bjhb			/*
366c741b8bjhb			 * The kernel debugger might have set a breakpoint,
367c741b8bjhb			 * so flush the instruction cache.
368c741b8bjhb			 */
369c741b8bjhb			fence_i();
370c6609a6br			break;
371c6609a6br		case IPI_HARDCLOCK:
372c6609a6br			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
373c6609a6br			hardclockintr();
374c6609a6br			break;
375c6609a6br		default:
376c6609a6br			panic("Unknown IPI %#0x on cpu %d", ipi, curcpu);
377c6609a6br		}
378c6609a6br	}
379c6609a6br
380c6609a6br	return (FILTER_HANDLED);
381c6609a6br}
382c6609a6br
383c6609a6brstruct cpu_group *
384c6609a6brcpu_topo(void)
385c6609a6br{
386c6609a6br
387c6609a6br	return (smp_topo_none());
388c6609a6br}
389c6609a6br
390c6609a6br/* Determine if we running MP machine */
391c6609a6brint
392c6609a6brcpu_mp_probe(void)
393c6609a6br{
394c6609a6br
395c6609a6br	return (mp_ncpus > 1);
396c6609a6br}
397c6609a6br
398c6609a6br#ifdef FDT
399c6609a6brstatic boolean_t
400c6609a6brcpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
401c6609a6br{
402c6609a6br	struct pcpu *pcpup;
40331f17c4mhorne	vm_paddr_t start_addr;
4049423805br	uint64_t hart;
4059423805br	u_int cpuid;
406b18019emarkj	int naps;
40731f17c4mhorne	int error;
408c6609a6br
4099423805br	/* Check if this hart supports MMU. */
4109423805br	if (OF_getproplen(node, "mmu-type") < 0)
411c6609a6br		return (0);
412c6609a6br
413c6609a6br	KASSERT(id < MAXCPU, ("Too many CPUs"));
414c6609a6br
415c6609a6br	KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size"));
416c6609a6br#ifdef INVARIANTS
417c6609a6br	cpu_reg[id][0] = reg[0];
418c6609a6br	if (addr_size == 2)
419c6609a6br		cpu_reg[id][1] = reg[1];
420c6609a6br#endif
421c6609a6br
4229423805br	hart = reg[0];
423c6609a6br	if (addr_size == 2) {
4249423805br		hart <<= 32;
4259423805br		hart |= reg[1];
426c6609a6br	}
427c6609a6br
4289423805br	KASSERT(hart < MAXCPU, ("Too many harts."));
429c6609a6br
4309423805br	/* We are already running on this cpu */
4319423805br	if (hart == boot_hart)
432c6609a6br		return (1);
433c6609a6br
4349423805br	/*
4359423805br	 * Rotate the CPU IDs to put the boot CPU as CPU 0.
4369423805br	 * We keep the other CPUs ordered.
4379423805br	 */
4389423805br	cpuid = hart;
4399423805br	if (cpuid < boot_hart)
4409423805br		cpuid += mp_maxid + 1;
4419423805br	cpuid -= boot_hart;
442c6609a6br
4439423805br	/* Check if we are able to start this cpu */
4449423805br	if (cpuid > mp_maxid)
4459423805br		return (0);
446c6609a6br
44731f17c4mhorne	/*
44831f17c4mhorne	 * Depending on the SBI implementation, APs are waiting either in
44931f17c4mhorne	 * locore.S or to be activated explicitly, via SBI call.
45031f17c4mhorne	 */
45131f17c4mhorne	if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) {
45231f17c4mhorne		start_addr = pmap_kextract((vm_offset_t)mpentry);
45331f17c4mhorne		error = sbi_hsm_hart_start(hart, start_addr, 0);
45431f17c4mhorne		if (error != 0) {
45531f17c4mhorne			mp_ncpus--;
45631f17c4mhorne
45731f17c4mhorne			/* Send a warning to the user and continue. */
45831f17c4mhorne			printf("AP %u (hart %lu) failed to start, error %d\n",
45931f17c4mhorne			    cpuid, hart, error);
46031f17c4mhorne			return (0);
46131f17c4mhorne		}
46231f17c4mhorne	}
46331f17c4mhorne
4649423805br	pcpup = &__pcpu[cpuid];
4659423805br	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
4669423805br	pcpup->pc_hart = hart;
467c6609a6br
4689423805br	dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
4699423805br	dpcpu_init(dpcpu[cpuid - 1], cpuid);
4709423805br
471b18019emarkj	bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
472b18019emarkj
473b18019emarkj	naps = atomic_load_int(&aps_started);
474b18019emarkj	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
475b18019emarkj
4769423805br	printf("Starting CPU %u (hart %lx)\n", cpuid, hart);
477b18019emarkj	atomic_store_32(&__riscv_boot_ap[hart], 1);
478b18019emarkj
479b18019emarkj	/* Wait for the AP to switch to its boot stack. */
480b18019emarkj	while (atomic_load_int(&aps_started) < naps + 1)
481b18019emarkj		cpu_spinwait();
4829423805br
4839423805br	CPU_SET(cpuid, &all_cpus);
4849423805br	CPU_SET(hart, &all_harts);
485c6609a6br
486c6609a6br	return (1);
487c6609a6br}
488c6609a6br#endif
489c6609a6br
490c6609a6br/* Initialize and fire up non-boot processors */
491c6609a6brvoid
492c6609a6brcpu_mp_start(void)
493c6609a6br{
494c6609a6br
495c6609a6br	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
496c6609a6br
497c6609a6br	CPU_SET(0, &all_cpus);
4989423805br	CPU_SET(boot_hart, &all_harts);
499c6609a6br
500c6609a6br	switch(cpu_enum_method) {
501c6609a6br#ifdef FDT
502c6609a6br	case CPUS_FDT:
503c6609a6br		ofw_cpu_early_foreach(cpu_init_fdt, true);
504c6609a6br		break;
505c6609a6br#endif
506c6609a6br	case CPUS_UNKNOWN:
507c6609a6br		break;
508c6609a6br	}
509c6609a6br}
510c6609a6br
511c6609a6br/* Introduce rest of cores to the world */
512c6609a6brvoid
513c6609a6brcpu_mp_announce(void)
514c6609a6br{
515c6609a6br}
516c6609a6br
5179423805brstatic boolean_t
5189423805brcpu_check_mmu(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
5199423805br{
5209423805br
5219423805br	/* Check if this hart supports MMU. */
5229423805br	if (OF_getproplen(node, "mmu-type") < 0)
5239423805br		return (0);
5249423805br
5259423805br	return (1);
5269423805br}
5279423805br
528c6609a6brvoid
529c6609a6brcpu_mp_setmaxid(void)
530c6609a6br{
531c6609a6br#ifdef FDT
532c6609a6br	int cores;
533c6609a6br
5349423805br	cores = ofw_cpu_early_foreach(cpu_check_mmu, true);
535c6609a6br	if (cores > 0) {
536c6609a6br		cores = MIN(cores, MAXCPU);
537c6609a6br		if (bootverbose)
538c6609a6br			printf("Found %d CPUs in the device tree\n", cores);
539c6609a6br		mp_ncpus = cores;
540c6609a6br		mp_maxid = cores - 1;
541c6609a6br		cpu_enum_method = CPUS_FDT;
542c6609a6br		return;
543c6609a6br	}
544c6609a6br#endif
545c6609a6br
546c6609a6br	if (bootverbose)
547c6609a6br		printf("No CPU data, limiting to 1 core\n");
548c6609a6br	mp_ncpus = 1;
549c6609a6br	mp_maxid = 0;
550c6609a6br}
551